From: Tianon Gravi Date: Fri, 30 Oct 2015 02:40:51 +0000 (+0000) Subject: Import docker.io_1.8.3~ds1.orig.tar.gz X-Git-Tag: archive/raspbian/18.09.1+dfsg1-7+rpi1~1^2^2^2~11 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=a9047661aa76f75fe03aa6d1316780a1ab66f578;p=docker.io.git Import docker.io_1.8.3~ds1.orig.tar.gz [dgit import orig docker.io_1.8.3~ds1.orig.tar.gz] --- a9047661aa76f75fe03aa6d1316780a1ab66f578 diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..37abdef4 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +bundles +.gopath diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..0171e4cc --- /dev/null +++ b/.gitignore @@ -0,0 +1,37 @@ +# Docker project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.exe +*.exe~ +*.orig +*.rej +*.test +.*.swp +.DS_Store +.bashrc +.dotcloud +.flymake* +.git/ +.gopath/ +.hg/ +.vagrant* +Vagrantfile +a.out +autogen/ +bin +build_src +bundles/ +docker/docker +docs/AWS_S3_BUCKET +docs/GITCOMMIT +docs/GIT_BRANCH +docs/VERSION +docs/_build +docs/_static +docs/_templates +docs/changed-files +# generated by man/man/md2man-all.sh +man/man1 +man/man5 +pyenv +vendor/pkg/ diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..8348b4a8 --- /dev/null +++ b/.mailmap @@ -0,0 +1,171 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni jpetazzo +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit + +Alexandr Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + +Francisco Carriedo + + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle Jessie Frazelle + + + + + +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Jacob Atzen +Jeff Nickoloff + +John Howard (VM) John Howard +Madhu Venugopal +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 00000000..cfc67e32 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,1033 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Welch +Abel Muiño +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Miller +Adam Singer +Aditya +Adria Casas +Adrian Mouat +Adrien Folie +Ahmed Kamal +Ahmet Alp Balkan +Aidan Hobson Sayers +AJ Bowen +Al Tobey +alambike +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Gaynor +Alex Warhawk +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandr Morozov +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Allen Madsen +almoehi +Alvin Richards +amangoel +Amit Bakshi +Amy Lindburg +Anand Patil +AnandkumarPatel +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Kuklewicz +Andrew Macgregor +Andrew Martin +Andrew Munsell +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Ankush Agarwal +Anthony Baire +Anthony Bishopric +Anton Löfgren +Anton Nikitin +Anton Tiurin +Antonio Murdaca +Antony Messerli +apocas +ArikaChen +Arnaud Porterie +Arthur Barr +Arthur Gautier +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +bdevloed +Ben Firshman +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +bin liu +Blake Geno +bobby abbott +boucher +Bouke Haarsma +Boyd Hemphill +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Kochendorfer +Brian (bex) Exelbierd +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Carl X. Su +Cary +Casey Bisson +Charles Hooper +Charles Lindsay +Charles Merriam +Charlie Lewis +Chen Chao +Chen Hanxiao +cheney90 +Chewey +Chia-liang Kao +chli +Chris Alfonso +Chris Armstrong +Chris Khoo +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Wahl +chrismckinnel +Christian Berendt +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Troestler +Christopher Currie +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Colm Hally +Cory Forsyth +cressie176 +Cristian Staretu +Cruceru Calin-Cristian +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Griffin +Dan Hirsch +Dan Keder +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Dave Henderson +David Anderson +David Calavera +David Corking +David Davis +David Gageot +David Gebler +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sissitka +David Xia +David Young +Davide Ceretti +Dawn Chen +decadent +Deng Guangxing +Deni Bertovic +Derek +Derek +Derek McGowan +Deric Crago +Deshi Xiao +Dinesh Subhraveti +DiuDiugirl +Djibril Koné +dkumor +Dmitry Demeshchuk +Dmitry Gusev +Dmitry V. Krivenok +Dolph Mathews +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Doug Davis +Doug MacEachern +doug tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivind Uggedal +Elias Probst +Elijah Zupancic +eluck +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Windisch +Eric-Olivier Lamey +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evgeny Vereshchagin +Eystein MÃ¥løy Stenberg +ezbercih +Fabiano Rosas +Fabio Falci +Fabio Rehm +Fabrizio Regini +Faiz Khan +falmp +Fareed Dudhia +Felix Rabe +Felix Schindler +Ferenc Szabo +Fernando +Filipe Brandenburger +Flavio Castelli +FLGMwt +Florian Weingarten +Francisco Carriedo +Francisco Souza +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Freek Kalter +Félix Baylac-Jacqué +Gabe Rosenhouse +Gabor Nagy +Gabriel Monroy +Galen Sampson +Gareth Rushgrove +Gaurav +gautam, prasanna +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Gereon Frey +German DZ +Gert van Valkenhoef +Gianluca Borello +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +Goffert van Gool +golubbe +Gosuke Miyashita +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +Günter Zöchbauer +Hans Rødtang +Harald Albers +Harley Laue +Harry Zhang +He Simei +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +Hu Keping +Hu Tao +Huayi Zhang +Hugo Duncan +Hunter Blanks +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Main +Ian Truslove +Iavael +Igor Dolzhikov +ILYA Khlopotov +imre Fitos +inglesp +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Fraixedes +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jake Champlin +Jake Moshenko +jakedt +James Allen +James Carr +James DeFelice +James Harrison Fisher +James Kyle +James Lal +James Mills +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Biel +Jaroslaw Zabiello +jaseg +Jason Divock +Jason Giedymin +Jason Hall +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +Jay +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Paul Calderone +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Lindsay +Jeff Nickoloff +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jeremy Grosser +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jianbosun +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiří Župka +jjy +jmzwcn +Joe Beda +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John OBrien III +John Tims +John Warwick +John Willis +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Joost Cassee +Jordan Arentsen +Jordan Sissel +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Hawn +Josh Poimboeuf +Josiah Kiehl +José Tomás Albornoz +JP +Julian Taylor +Julien Barbier +Julien Bordellier +Julien Dubois +Jun-Ru Chang +Justin Force +Justin Plock +Justin Simonelis +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kamil Domanski +Karan Lyons +kargakis +Karl Grzeszczak +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Keli Hu +Ken Cochrane +Ken ICHIKAWA +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Clark +Kevin J. Lynagh +Kevin Menard +Kevin Wallace +Kevin Yap +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kimbro Staken +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Konrad Kleine +Konstantin Pelykh +Krasimir Georgiev +krrg +Kyle Conroy +kyu +Lachlan Coote +Lajos Papp +Lakshan Perera +lalyos +Lance Chen +Lance Kinley +Lars Kellogg-Stedman +Lars R. Damerow +Laurie Voss +leeplay +Lei Jitang +Len Weincier +Leszek Kowalski +Levi Gross +Lewis Marshall +Lewis Peckover +Liana Lo +Liang-Chi Hsieh +limsy +Liu Hua +Lloyd Dewolf +Lokesh Mandvekar +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luis Martínez de Bartolomé Izquierdo +lukaspustina +lukemarsden +Lénaïc Huard +Ma Shimiao +Mabin +Madhu Venugopal +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manuel Meurer +Manuel Woelker +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marco Hennings +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna +Marius Voila +Mark Allen +Mark McGranaghan +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Redmond +Mary Anthony +Masahito Zembutsu +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt McCormick +Matthew Heon +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +mattymo +mattyw +mauriyouth +Max Shytikov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mengdi Gao +Mert Yazıcıoğlu +Michael A. Smith +Michael Brown +Michael Chiang +Michael Crosby +Michael Gorsuch +Michael Hudson-Doyle +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Jemala +Michal Minar +Michaël Pailloncy +Michiel@unhosted +Miguel Angel Fernández +Mihai Borobocea +Mike Chelen +Mike Dillon +Mike Gaffney +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +Mikhail Sobolev +Mingzhen Feng +Mitch Capper +Mohit Soni +Morgante Pell +Morten Siebuhr +Moysés Borges +Mrunal Patel +mschurenko +Mustafa Akın +Médi-Rémi Hashim +Nan Monnand Deng +Naoki Orii +Natalie Parker +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Neal McBurnett +Nelson Chen +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +NikolaMandic +nikolas +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ole Reifschneider +Olivier Gambier +pandrew +panticz +Pascal Borreli +Pascal Hartig +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Hammond +Paul Jimenez +Paul Lietar +Paul Morie +Paul Nasrat +Paul Weaver +Pavel Lobashov +Pavel Tikhomirov +Pavlos Ratis +Peggy Li +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Ericson +Peter Esbensen +Peter Salvatore +Peter Volpe +Peter Waller +Phil +Phil Estes +Phil Spitler +Philipp Weissensteiner +Phillip Alexander +Piergiuliano Bossi +Pierre +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Pradeep Chhetri +Prasanna Gautam +Przemek Hejman +pysqz +Qiang Huang +Quentin Brossard +r0n22 +Rafal Jeczalik +Rafe Colton +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralph Bean +Ramkumar Ramachandra +Ramon van Alteren +Recursive Madman +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Metzler +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Robert Bachmann +Robert Bittle +Robert Obryk +Roberto G. Hashioka +Robin Speekenbrink +robpc +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Roland Huß +Roland Moriz +Ron Smits +root +Rovanion Luckey +Rudolph Gottesheim +Ryan Anderson +Ryan Aslett +Ryan Detzel +Ryan Fowler +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Reis +Sam Rijs +Sami Wagiaalla +Samuel Andaya +Samuel PHAN +Sankar சங்கர் +Sanket Saurav +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Cronin +Sean P. Kane +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +SeongJae Park +Seongyeol Lim +Sergey Alekseev +Sergey Evstifeev +Shane Canon +shaunol +Shawn Landden +Shawn Siefkas +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +shuai-z +sidharthamani +Silas Sewell +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +Solomon Hykes +Song Gao +Soulou +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +Srini Brahmaroutu +Steeve Morin +Stefan Praszalowicz +Stephen Crosby +Stephen J Day +Steve Francia +Steve Koch +Steven Burgess +Steven Merrill +Steven Richards +Steven Taylor +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Hansen +Thomas LEVEIL +Thomas Orozco +Thomas Schroeter +Thomas Sjögren +Thomas Texier +Tianon Gravi +Tibor Vass +Tiffany Low +Tim Bosse +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Gesellchen +Tobias Schmidt +Tobias Schwab +Todd Lunter +Todd Whiteman +Tom Fotherby +Tom Hulihan +Tom Maaswinkel +Tomas Tomecek +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonis Tiigi +Tonny Xu +Tony Daws +Tony Miller +Torstein Husebø +tpng +Travis Cline +Travis Thieman +Trent Ogren +Tristan Carel +Tyler Brock +Tzu-Jung Lee +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +vgeta +Victor Coisne +Victor Lyuboslavsky +Victor Marmol +Victor Vieux +Viktor Vojnovski +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Rutsky +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +Ward Vandewege +WarheadsSE +Wayne Chang +Wei-Ting Kuo +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +xamyzhao +XiaoBing Jiang +Xinzi Zhou +Xiuming Chen +xuzhaokui +y00277921 +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +Yasunori Mahata +Yestin Sun +Yihang Ho +Yohei Ueda +Yongzhi Pan +Yuan Sun +Yurii Rashkovskii +Zac Dover +Zach Borboa +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Wei +Zhang Wentao +Zilin Du +zimbatm +Zoltan Tombol +zqh +Álex González +Álvaro Lázaro +尹吉峰 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..d60f7a0e --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,1848 @@ +# Changelog + +## 1.8.3 (2015-10-12) + +### Distribution + +- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) +- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) ++ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry + +## 1.8.2 (2015-09-10) + +### Distribution + +- Fixes rare edge case of handling GNU LongLink and LongName entries. +- Fix ^C on docker pull. +- Fix docker pull issues on client disconnection. +- Fix issue that caused the daemon to panic when loggers weren't configured properly. +- Fix goroutine leak pulling images from registry V2. + +### Runtime + +- Fix a bug mounting cgroups for docker daemons running inside docker containers. +- Initialize log configuration properly. + +### Client: + +- Handle `-q` flag in `docker ps` properly when there is a default format. + +### Networking + +- Fix several corner cases with netlink. + +### Contrib + +- Fix several issues with bash completion. + +## 1.8.1 (2015-08-12) + +### Distribution + +- Fix a bug where pushing multiple tags would result in invalid images + +## 1.8.0 (2015-08-11) + +### Distribution + ++ Trusted pull, push and build, disabled by default +* Make tar layers deterministic between registries +* Don't allow deleting the image of running containers +* Check if a tag name to load is a valid digest +* Allow one character repository names +* Add a more accurate error description for invalid tag name +* Make build cache ignore mtime + +### Cli + ++ Add support for DOCKER_CONFIG/--config to specify config file dir ++ Add --type flag for docker inspect command ++ Add formatting options to `docker ps` with `--format` ++ Replace `docker -d` with new subcommand `docker daemon` +* Zsh completion updates and improvements +* Add some missing events to bash completion +* Support daemon urls with base paths in `docker -H` +* Validate status= filter to docker ps +* Display when a container is in --net=host in docker ps +* Extend docker inspect to export image metadata related to graph driver +* Restore --default-gateway{,-v6} daemon options +* Add missing unpublished ports in docker ps +* Allow duration strings in `docker events` as --since/--until +* Expose more mounts information in `docker inspect` + +### Runtime + ++ Add new Fluentd logging driver ++ Allow `docker import` to load from local files ++ Add logging driver for GELF via UDP ++ Allow to copy files from host to containers with `docker cp` ++ Promote volume drivers from experimental to master ++ Add rollover log driver, and --log-driver-opts flag ++ Add memory swappiness tuning options +* Remove cgroup read-only flag when privileged +* Make /proc, /sys, & /dev readonly for readonly containers +* Add cgroup bind mount by default +* Overlay: Export metadata for container and image in `docker inspect` +* Devicemapper: external device activation +* Devicemapper: Compare uuid of base device on startup +* Remove RC4 from the list of registry cipher suites +* Add syslog-facility option +* LXC execdriver compatibility with recent LXC versions +* Mark LXC execriver as deprecated (to be removed with the migration to runc) + +### Plugins + +* Separate plugin sockets and specs locations +* Allow TLS connections to plugins + +### Bug fixes + +- Add missing 'Names' field to /containers/json API output +- Make `docker rmi --dangling` safe when pulling +- Devicemapper: Change default basesize to 100G +- Go Scheduler issue with sync.Mutex and gcc +- Fix issue where Search API endpoint would panic due to empty AuthConfig +- Set image canonical names correctly +- Check dockerinit only if lxc driver is used +- Fix ulimit usage of nproc +- Always attach STDIN if -i,--interactive is specified +- Show error messages when saving container state fails +- Fixed incorrect assumption on --bridge=none treated as disable network +- Check for invalid port specifications in host configuration +- Fix endpoint leave failure for --net=host mode +- Fix goroutine leak in the stats API if the container is not running +- Check for apparmor file before reading it +- Fix DOCKER_TLS_VERIFY being ignored +- Set umask to the default on startup +- Correct the message of pause and unpause a non-running container +- Adjust disallowed CpuShares in container creation +- ZFS: correctly apply selinux context +- Display empty string instead of when IP opt is nil +- `docker kill` returns error when container is not running +- Fix COPY/ADD quoted/json form +- Fix goroutine leak on logs -f with no output +- Remove panic in nat package on invalid hostport +- Fix container linking in Fedora 22 +- Fix error caused using default gateways outside of the allocated range +- Format times in inspect command with a template as RFC3339Nano +- Make registry client to accept 2xx and 3xx http status responses as successful +- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. +- Fix error when the docker ps format was not valid. +- Remove redundant ip forward check. +- Fix issue trying to push images to repository mirrors. +- Fix error cleaning up network entrypoints when there is an initialization issue. + +## 1.7.1 (2015-07-14) + +#### Runtime + +- Fix default user spawning exec process with `docker exec` +- Make `--bridge=none` not to configure the network bridge +- Publish networking stats properly +- Fix implicit devicemapper selection with static binaries +- Fix socket connections that hung intermittently +- Fix bridge interface creation on CentOS/RHEL 6.6 +- Fix local dns lookups added to resolv.conf +- Fix copy command mounting volumes +- Fix read/write privileges in volumes mounted with --volumes-from + +#### Remote API + +- Fix unmarshalling of Command and Entrypoint +- Set limit for minimum client version supported +- Validate port specification +- Return proper errors when attach/reattach fail + +#### Distribution + +- Fix pulling private images +- Fix fallback between registry V2 and V1 + +## 1.7.0 (2015-06-16) + +#### Runtime ++ Experimental feature: support for out-of-process volume plugins +* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag +* The `exec` command supports the `-u|--user` flag to specify the new process owner ++ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags ++ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` ++ Container block IO can be controlled in `docker run` using`--blkio-weight` ++ ZFS support ++ The `docker logs` command supports a `--since` argument ++ UTS namespace can be shared with the host with `docker run --uts=host` + +#### Quality +* Networking stack was entirely rewritten as part of the libnetwork effort +* Engine internals refactoring +* Volumes code was entirely rewritten to support the plugins effort ++ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting + +#### Build ++ Support ${variable:-value} and ${variable:+value} syntax for environment variables ++ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` ++ git context changes with branches and directories +* The .dockerignore file support exclusion rules + +#### Distribution ++ Client support for v2 mirroring support for the official registry + +#### Bugfixes +* Firewalld is now supported and will automatically be used when available +* mounting --device recursively + +## 1.6.2 (2015-05-13) + +#### Runtime +- Revert change prohibiting mounting into /sys + +## 1.6.1 (2015-05-07) + +#### Security +- Fix read/write /proc paths (CVE-2015-3630) +- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) +- Fix opening of file-descriptor 1 (CVE-2015-3627) +- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) +- Prohibit mount of /sys + +#### Runtime +- Update AppArmor policy to not allow mounts + +## 1.6.0 (2015-04-07) + +#### Builder ++ Building images from an image ID ++ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` ++ `commit --change` to apply specified Dockerfile instructions while committing the image ++ `import --change` to apply specified Dockerfile instructions while importing the image ++ Builds no longer continue in the background when canceled with CTRL-C + +#### Client ++ Windows Support + +#### Runtime ++ Container and image Labels ++ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within ++ Logging drivers, `json-file`, `syslog`, or `none` ++ Pulling images by ID ++ `--ulimit` to set the ulimit on a container ++ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) + +## 1.5.0 (2015-02-10) + +#### Builder ++ Dockerfile to use for a given `docker build` can be specified with the `-f` flag +* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache +* ADD and COPY instructions accept relative paths +* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier +* Improve performance when exposing a large number of ports + +#### Hack ++ Allow client-side only integration tests for Windows +* Include docker-py integration tests against Docker daemon as part of our test suites + +#### Packaging ++ Support for the new version of the registry HTTP API +* Speed up `docker push` for images with a majority of already existing layers +- Fixed contacting a private registry through a proxy + +#### Remote API ++ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command ++ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command +* Container `inspect` endpoint show the ID of `exec` commands running in this container +* Container `inspect` endpoint show the number of times Docker auto-restarted the container +* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' +- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes + +#### Runtime ++ Docker daemon has full IPv6 support ++ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools ++ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted ++ Container total memory usage can be limited for `docker run` using the `—memory-swap` flag +* Major stability improvements for devicemapper storage driver +* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted +* Better integration with host system: per-container iptable rules are moved to the DOCKER chain +- Fixed container exiting on out of memory to return an invalid exit code + +#### Other +* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon + +## 1.4.1 (2014-12-15) + +#### Runtime +- Fix issue with volumes-from and bind mounts not being honored after create + +## 1.4.0 (2014-12-11) + +#### Notable Features since 1.3.0 ++ Set key=value labels to the daemon (displayed in `docker info`), applied with + new `-label` daemon flag ++ Add support for `ENV` in Dockerfile of the form: + `ENV name=value name2=value2...` ++ New Overlayfs Storage Driver ++ `docker info` now returns an `ID` and `Name` field ++ Filter events by event name, container, or image ++ `docker cp` now supports copying from container volumes +- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing + image. + +## 1.3.3 (2014-12-11) + +#### Security +- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) +- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) +- Validate image IDs (CVE-2014-9358) + +#### Runtime +- Fix an issue when image archives are being read slowly + +#### Client +- Fix a regression related to stdin redirection +- Fix a regression with `docker cp` when destination is the current directory + +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +## 1.3.0 (2014-10-14) + +#### Notable features since 1.2.0 ++ Docker `exec` allows you to run additional processes inside existing containers ++ Docker `create` gives you the ability to create a container via the CLI without executing a process ++ `--security-opts` options to allow user to customize container labels and apparmor profiles ++ Docker `ps` filters +- Wildcard support to COPY/ADD ++ Move production URLs to get.docker.com from get.docker.io ++ Allocate IP address on the bridge inside a valid CIDR ++ Use drone.io for PR and CI testing ++ Ability to setup an official registry mirror ++ Ability to save multiple images with docker `save` + +## 1.2.0 (2014-08-20) + +#### Runtime ++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime ++ Auto-restart containers using policies ++ Use /var/lib/docker/tmp for large temporary files ++ `--cap-add` and `--cap-drop` to tweak what linux capability you want ++ `--device` to use devices in containers + +#### Client ++ `docker search` on private registries ++ Add `exited` filter to `docker ps --filter` +* `docker rm -f` now kills instead of stop ++ Support for IPv6 addresses in `--dns` flag + +#### Proxy ++ Proxy instances in separate processes +* Small bug fix on UDP proxy + +## 1.1.2 (2014-07-23) + +#### Runtime ++ Fix port allocation for existing containers ++ Fix containers restart on daemon restart + +#### Packaging ++ Fix /etc/init.d/docker issue on Debian + +## 1.1.1 (2014-07-09) + +#### Builder +* Fix issue with ADD + +## 1.1.0 (2014-07-03) + +#### Notable features since 1.0.1 ++ Add `.dockerignore` support ++ Pause containers during `docker commit` ++ Add `--tail` to `docker logs` + +#### Builder ++ Allow a tar file as context for `docker build` +* Fix issue with white-spaces and multi-lines in `Dockerfiles` + +#### Runtime +* Overall performance improvements +* Allow `/` as source of `docker run -v` +* Fix port allocation +* Fix bug in `docker save` +* Add links information to `docker inspect` + +#### Client +* Improve command line parsing for `docker commit` + +#### Remote API +* Improve status code for the `start` and `stop` endpoints + +## 1.0.1 (2014-06-19) + +#### Notable features since 1.0.0 +* Enhance security for the LXC driver + +#### Builder +* Fix `ONBUILD` instruction passed to grandchildren + +#### Runtime +* Fix events subscription +* Fix /etc/hostname file with host networking +* Allow `-h` and `--net=none` +* Fix issue with hotplug devices in `--privileged` + +#### Client +* Fix artifacts with events +* Fix a panic with empty flags +* Fix `docker cp` on Mac OS X + +#### Miscellaneous +* Fix compilation on Mac OS X +* Fix several races + +## 1.0.0 (2014-06-09) + +#### Notable features since 0.12.0 +* Production support + +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* 40+ various improvements to stability, performance and usability +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existing devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + +## 0.9.1 (2014-03-24) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. + +#### Documentation +- Fix external link on security of containers. + +#### Contrib +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + +#### Hack +- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. + +#### Remote API +- Fix content-type detection in `docker cp`. + +#### Runtime +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Only unshare the mount namespace for execin. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Merge existing config when committing. +- Fix panic in monitor. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Improve deprecation message. +- Fix attach exit on darwin. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). +- devicemapper: succeed immediately when removing non-existing devices. +- devicemapper: increase timeout in waitClose to 10 seconds. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. + +## 0.9.0 (2014-03-10) + +#### Builder +- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. +- Add error to docker build --rm. This adds missing error handling. +- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. +- Make `--rm` the default for `docker build`. + +#### Documentation +- Download the docker client binary for Mac over https. +- Update the titles of the install instructions & descriptions. +* Add instructions for upgrading boot2docker. +* Add port forwarding example in OS X install docs. +- Attempt to disentangle repository and registry. +- Update docs to explain more about `docker ps`. +- Update sshd example to use a Dockerfile. +- Rework some examples, including the Python examples. +- Update docs to include instructions for a container's lifecycle. +- Update docs documentation to discuss the docs branch. +- Don't skip cert check for an example & use HTTPS. +- Bring back the memory and swap accounting section which was lost when the kernel page was removed. +- Explain DNS warnings and how to fix them on systems running and using a local nameserver. + +#### Contrib +- Add Tanglu support for mkimage-debootstrap. +- Add SteamOS support for mkimage-debootstrap. + +#### Hack +- Get package coverage when running integration tests. +- Remove the Vagrantfile. This is being replaced with boot2docker. +- Fix tests on systems where aufs isn't available. +- Update packaging instructions and remove the dependency on lxc. + +#### Remote API +* Move code specific to the API to the api package. +- Fix header content type for the API. Makes all endpoints use proper content type. +- Fix registry auth & remove ping calls from CmdPush and CmdPull. +- Add newlines to the JSON stream functions. + +#### Runtime +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. +- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. +- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. +- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. +* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. +- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. +- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. +- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. +- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. +- Fix custom bridge related options. This makes custom bridges work again. ++ Mount-bind the PTY as container console. This allows tmux/screen to run. ++ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. ++ Add native exec driver which uses libcontainer and make it the default exec driver. +- Add support for handling extended attributes in archives. +* Set the container MTU to be the same as the host MTU. ++ Add simple sha256 checksums for layers to speed up `docker push`. +* Improve kernel version parsing. +* Allow flag grouping (`docker run -it`). +- Remove chroot exec driver. +- Fix divide by zero to fix panic. +- Rewrite `docker rmi`. +- Fix docker info with lxc 1.0.0. +- Fix fedora tty with apparmor. +* Don't always append env vars, replace defaults with vars from config. +* Fix a goroutine leak. +* Switch to Go 1.2.1. +- Fix unique constraint error checks. +* Handle symlinks for Docker's data directory and for TMPDIR. +- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) +- Add apparmor profile for the native execution driver. +* Move system specific code from archive to pkg/system. +- Fix duplicate signal for `docker run -i -t` (issue #3336). +- Return correct process pid for lxc. +- Add a -G option to specify the group which unix sockets belong to. ++ Add `-f` flag to `docker rm` to force removal of running containers. ++ Kill ghost containers and restart all ghost containers when the docker daemon restarts. ++ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. + +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgresSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + +## 0.8.0 (2014-02-04) + +#### Notable features since 0.7.0 + +* Images and containers can be removed much faster +* Building an image from source with docker build is now much faster +* The Docker daemon starts and stops much faster +* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations +* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations +* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar +* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers +With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages +* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change + +* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed +* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build +* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write +* Docker is officially supported on Mac OS X +* The Docker daemon supports systemd socket activation + +## 0.7.6 (2014-01-14) + +#### Builder + +* Do not follow symlink outside of build context + +#### Runtime + +- Remount bind mounts when ro is specified +* Use https for fetching docker version + +#### Other + +* Inline the test.docker.io fingerprint +* Add ca-certificates to packaging documentation + +## 0.7.5 (2014-01-09) + +#### Builder + +* Disable compression for build. More space usage but a much faster upload +- Fix ADD caching for certain paths +- Do not compress archive from git build + +#### Documentation + +- Fix error in GROUP add example +* Make sure the GPG fingerprint is inline in the documentation +* Give more specific advice on setting up signing of commits for DCO + +#### Runtime + +- Fix misspelled container names +- Do not add hostname when networking is disabled +* Return most recent image from the cache by date +- Return all errors from docker wait +* Add Content-Type Header "application/json" to GET /version and /info responses + +#### Other + +* Update DCO to version 1.1 ++ Update Makefile to use "docker:GIT_BRANCH" as the generated image name +* Update Travis to check for new 1.1 DCO version + +## 0.7.4 (2014-01-07) + +#### Builder + +- Fix ADD caching issue with . prefixed path +- Fix docker build on devicemapper by reverting sparse file tar option +- Fix issue with file caching and prevent wrong cache hit +* Use same error handling while unmarshalling CMD and ENTRYPOINT + +#### Documentation + +* Simplify and streamline Amazon Quickstart +* Install instructions use unprefixed Fedora image +* Update instructions for mtu flag for Docker on GCE ++ Add Ubuntu Saucy to installation +- Fix for wrong version warning on master instead of latest + +#### Runtime + +- Only get the image's rootfs when we need to calculate the image size +- Correctly handle unmapping UDP ports +* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build +- Fix login message to say pull instead of push +- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN +* Make blank -H option default to the same as no -H was sent +* Extract cgroups utilities to own submodule + +#### Other + ++ Add Travis CI configuration to validate DCO and gofmt requirements ++ Add Developer Certificate of Origin Text +* Upgrade VBox Guest Additions +* Check standalone header when pinging a registry server + +## 0.7.3 (2014-01-02) + +#### Builder + ++ Update ADD to use the image cache, based on a hash of the added content +* Add error message for empty Dockerfile + +#### Documentation + +- Fix outdated link to the "Introduction" on www.docker.io ++ Update the docs to get wider when the screen does +- Add information about needing to install LXC when using raw binaries +* Update Fedora documentation to disentangle the docker and docker.io conflict +* Add a note about using the new `-mtu` flag in several GCE zones ++ Add FrugalWare installation instructions ++ Add a more complete example of `docker run` +- Fix API documentation for creating and starting Privileged containers +- Add missing "name" parameter documentation on "/containers/create" +* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration +- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 + +#### Hack + +- Add missing libdevmapper dependency to the packagers documentation +* Update minimum Go requirement to a hard line at Go 1.2+ +* Many minor improvements to the Vagrantfile ++ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) ++ Add coverprofile generation reporting +- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually +* Update Dockerfile to be more canonical and have less spurious warnings during build +- Fix some miscellaneous `docker pull` progress bar display issues +* Migrate more miscellaneous packages under the "pkg" folder +* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" +* Reorganize syntax highlighting files under a common "contrib/syntax" directory +* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation +* Add support for container names in bash completion + +#### Packaging + ++ Add an official Docker client binary for Darwin (Mac OS X) +* Remove empty "Vendor" string and added "License" on deb package ++ Add a stubbed version of "/etc/default/docker" in the deb package + +#### Runtime + +* Update layer application to extract tars in place, avoiding file churn while handling whiteouts +- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) +* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) ++ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions +- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files +* Update container name validation to include '.' +- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected +* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler +* Update to use proper box-drawing characters everywhere in `docker images -tree` +* Move MTU setting from LXC configuration to directly use netlink +* Add `-S` option to external tar invocation for more efficient spare file handling ++ Add arch/os info to User-Agent string, especially for registry requests ++ Add `-mtu` option to Docker daemon for configuring MTU +- Fix `docker build` to exit with a non-zero exit code on error ++ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation + +## 0.7.2 (2013-12-16) + +#### Runtime + ++ Validate container names on creation with standard regex +* Increase maximum image depth to 127 from 42 +* Continue to move api endpoints to the job api ++ Add -bip flag to allow specification of dynamic bridge IP via CIDR +- Allow bridge creation when ipv6 is not enabled on certain systems +* Set hostname and IP address from within dockerinit +* Drop capabilities from within dockerinit +- Fix volumes on host when symlink is present the image +- Prevent deletion of image if ANY container is depending on it even if the container is not running +* Update docker push to use new progress display +* Use os.Lstat to allow mounting unix sockets when inspecting volumes +- Adjust handling of inactive user login +- Add missing defines in devicemapper for older kernels +- Allow untag operations with no container validation +- Add auth config to docker build + +#### Documentation + +* Add more information about Docker logging ++ Add RHEL documentation +* Add a direct example for changing the CMD that is run in a container +* Update Arch installation documentation ++ Add section on Trusted Builds ++ Add Network documentation page + +#### Other + ++ Add new cover bundle for providing code coverage reporting +* Separate integration tests in bundles +* Make Tianon the hack maintainer +* Update mkimage-debootstrap with more tweaks for keeping images small +* Use https to get the install script +* Remove vendored dotcloud/tar now that Go 1.2 has been released + +## 0.7.1 (2013-12-05) + +#### Documentation + ++ Add @SvenDowideit as documentation maintainer ++ Add links example ++ Add documentation regarding ambassador pattern ++ Add Google Cloud Platform docs ++ Add dockerfile best practices +* Update doc for RHEL +* Update doc for registry +* Update Postgres examples +* Update doc for Ubuntu install +* Improve remote api doc + +#### Runtime + ++ Add hostconfig to docker inspect ++ Implement `docker log -f` to stream logs ++ Add env variable to disable kernel version warning ++ Add -format to `docker inspect` ++ Support bind-mount for files +- Fix bridge creation on RHEL +- Fix image size calculation +- Make sure iptables are called even if the bridge already exists +- Fix issue with stderr only attach +- Remove init layer when destroying a container +- Fix same port binding on different interfaces +- `docker build` now returns the correct exit code +- Fix `docker port` to display correct port +- `docker build` now check that the dockerfile exists client side +- `docker attach` now returns the correct exit code +- Remove the name entry when the container does not exist + +#### Registry + +* Improve progress bars, add ETA for downloads +* Simultaneous pulls now waits for the first to finish instead of failing +- Tag only the top-layer image when pushing to registry +- Fix issue with offline image transfer +- Fix issue preventing using ':' in password for registry + +#### Other + ++ Add pprof handler for debug ++ Create a Makefile +* Use stdlib tar that now includes fix +* Improve make.sh test script +* Handle SIGQUIT on the daemon +* Disable verbose during tests +* Upgrade to go1.2 for official build +* Improve unit tests +* The test suite now runs all tests even if one fails +* Refactor C in Go (Devmapper) +- Fix OS X compilation + +## 0.7.0 (2013-11-25) + +#### Notable features since 0.6.0 + +* Storage drivers: choose from aufs, device-mapper, or vfs. +* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. +* Links: compose complex software stacks by connecting containers to each other. +* Container naming: organize your containers by giving them memorable names. +* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. +* Offline transfer: push and pull images to the filesystem without losing information. +* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. + +## 0.6.7 (2013-11-21) + +#### Runtime + +* Improve stability, fixes some race conditions +* Skip the volumes mounted when deleting the volumes of container. +* Fix layer size computation: handle hard links correctly +* Use the work Path for docker cp CONTAINER:PATH +* Fix tmp dir never cleanup +* Speedup docker ps +* More informative error message on name collisions +* Fix nameserver regex +* Always return long id's +* Fix container restart race condition +* Keep published ports on docker stop;docker start +* Fix container networking on Fedora +* Correctly express "any address" to iptables +* Fix network setup when reconnecting to ghost container +* Prevent deletion if image is used by a running container +* Lock around read operations in graph + +#### RemoteAPI + +* Return full ID on docker rmi + +#### Client + ++ Add -tree option to images ++ Offline image transfer +* Exit with status 2 on usage error and display usage on stderr +* Do not forward SIGCHLD to container +* Use string timestamp for docker events -since + +#### Other + +* Update to go 1.2rc5 ++ Add /etc/default/docker support to upstart + +## 0.6.6 (2013-11-06) + +#### Runtime + +* Ensure container name on register +* Fix regression in /etc/hosts ++ Add lock around write operations in graph +* Check if port is valid +* Fix restart runtime error with ghost container networking ++ Add some more colors and animals to increase the pool of generated names +* Fix issues in docker inspect ++ Escape apparmor confinement ++ Set environment variables using a file. +* Prevent docker insert to erase something ++ Prevent DNS server conflicts in CreateBridgeIface ++ Validate bind mounts on the server side ++ Use parent image config in docker build +* Fix regression in /etc/hosts + +#### Client + ++ Add -P flag to publish all exposed ports ++ Add -notrunc and -q flags to docker history +* Fix docker commit, tag and import usage ++ Add stars, trusted builds and library flags in docker search +* Fix docker logs with tty + +#### RemoteAPI + +* Make /events API send headers immediately +* Do not split last column docker top ++ Add size to history + +#### Other + ++ Contrib: Desktop integration. Firefox usecase. ++ Dockerfile: bump to go1.2rc3 + +## 0.6.5 (2013-10-29) + +#### Runtime + ++ Containers can now be named ++ Containers can now be linked together for service discovery ++ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors ++ Automatically start crashed containers after a reboot ++ Expose IP, port, and proto as separate environment vars for container links +* Allow ports to be published to specific ips +* Prohibit inter-container communication by default +- Ignore ErrClosedPipe for stdin in Container.Attach +- Remove unused field kernelVersion +* Fix issue when mounting subdirectories of /mnt in container +- Fix untag during removal of images +* Check return value of syscall.Chdir when changing working directory inside dockerinit + +#### Client + +- Only pass stdin to hijack when needed to avoid closed pipe errors +* Use less reflection in command-line method invocation +- Monitor the tty size after starting the container, not prior +- Remove useless os.Exit() calls after log.Fatal + +#### Hack + ++ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian +* Add -p option to invoke debootstrap with http_proxy +- Update install.sh with $sh_c to get sudo/su for modprobe +* Update all the mkimage scripts to use --numeric-owner as a tar argument +* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues + +#### Other + +* Documentation: Fix the flags for nc in example +* Testing: Remove warnings and prevent mount issues +- Testing: Change logic for tty resize to avoid warning in tests +- Builder: Fix race condition in docker build with verbose output +- Registry: Fix content-type for PushImageJSONIndex method +* Contrib: Improve helper tools to generate debian and Arch linux server images + +## 0.6.4 (2013-10-16) + +#### Runtime + +- Add cleanup of container when Start() fails +* Add better comments to utils/stdcopy.go +* Add utils.Errorf for error logging ++ Add -rm to docker run for removing a container on exit +- Remove error messages which are not actually errors +- Fix `docker rm` with volumes +- Fix some error cases where a HTTP body might not be closed +- Fix panic with wrong dockercfg file +- Fix the attach behavior with -i +* Record termination time in state. +- Use empty string so TempDir uses the OS's temp dir automatically +- Make sure to close the network allocators ++ Autorestart containers by default +* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` +* lxc: Allow set_file_cap capability in container +- Move run -rm to the cli only +* Split stdout stderr +* Always create a new session for the container + +#### Testing + +- Add aggregated docker-ci email report +- Add cleanup to remove leftover containers +* Add nightly release to docker-ci +* Add more tests around auth.ResolveAuthConfig +- Remove a few errors in tests +- Catch errClosing error when TCP and UDP proxies are terminated +* Only run certain tests with TESTFLAGS='-run TestName' make.sh +* Prevent docker-ci to test closing PRs +* Replace panic by log.Fatal in tests +- Increase TestRunDetach timeout + +#### Documentation + +* Add initial draft of the Docker infrastructure doc +* Add devenvironment link to CONTRIBUTING.md +* Add `apt-get install curl` to Ubuntu docs +* Add explanation for export restrictions +* Add .dockercfg doc +* Remove Gentoo install notes about #1422 workaround +* Fix help text for -v option +* Fix Ping endpoint documentation +- Fix parameter names in docs for ADD command +- Fix ironic typo in changelog +* Various command fixes in postgres example +* Document how to edit and release docs +- Minor updates to `postgresql_service.rst` +* Clarify LGTM process to contributors +- Corrected error in the package name +* Document what `vagrant up` is actually doing ++ improve doc search results +* Cleanup whitespace in API 1.5 docs +* use angle brackets in MAINTAINER example email +* Update archlinux.rst ++ Changes to a new style for the docs. Includes version switcher. +* Formatting, add information about multiline json +* Improve registry and index REST API documentation +- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 +* Update Gentoo installation documentation now that we're in the portage tree proper +* Cleanup and reorganize docs and tooling for contributors and maintainers +- Minor spelling correction of protocoll -> protocol + +#### Contrib + +* Add vim syntax highlighting for Dockerfiles from @honza +* Add mkimage-arch.sh +* Reorganize contributed completion scripts to add zsh completion + +#### Hack + +* Add vagrant user to the docker group +* Add proper bash completion for "docker push" +* Add xz utils as a runtime dep +* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates ++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link +* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly ++ Add @tianon to hack/MAINTAINERS +* Improve network performance for VirtualBox +* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) +- Fix contrib/mkimage-debian.sh apt caching prevention ++ Add Dockerfile.tmLanguage to contrib +* Configured FPM to make /etc/init/docker.conf a config file +* Enable SSH Agent forwarding in Vagrant VM +* Several small tweaks/fixes for contrib/mkimage-debian.sh + +#### Other + +- Builder: Abort build if mergeConfig returns an error and fix duplicate error message +- Packaging: Remove deprecated packaging directory +- Registry: Use correct auth config when logging in. +- Registry: Fix the error message so it is the same as the regex + +## 0.6.3 (2013-09-23) + +#### Packaging + +* Add 'docker' group on install for ubuntu package +* Update tar vendor dependency +* Download apt key over HTTPS + +#### Runtime + +- Only copy and change permissions on non-bindmount volumes +* Allow multiple volumes-from +- Fix HTTP imports from STDIN + +#### Documentation + +* Update section on extracting the docker binary after build +* Update development environment docs for new build process +* Remove 'base' image from documentation + +#### Other + +- Client: Fix detach issue +- Registry: Update regular expression to match index + +## 0.6.2 (2013-09-17) + +#### Runtime + ++ Add domainname support ++ Implement image filtering with path.Match +* Remove unnecessary warnings +* Remove os/user dependency +* Only mount the hostname file when the config exists +* Handle signals within the `docker login` command +- UID and GID are now also applied to volumes +- `docker start` set error code upon error +- `docker run` set the same error code as the process started + +#### Builder + ++ Add -rm option in order to remove intermediate containers +* Allow multiline for the RUN instruction + +#### Registry + +* Implement login with private registry +- Fix push issues + +#### Other + ++ Hack: Vendor all dependencies +* Remote API: Bump to v1.5 +* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. +* Documentation: General improvements + +## 0.6.1 (2013-08-23) + +#### Registry + +* Pass "meta" headers in API calls to the registry + +#### Packaging + +- Use correct upstart script with new build tool +- Use libffi-dev, don`t build it from sources +- Remove duplicate mercurial install command + +## 0.6.0 (2013-08-22) + +#### Runtime + ++ Add lxc-conf flag to allow custom lxc options ++ Add an option to set the working directory +* Add Image name to LogEvent tests ++ Add -privileged flag and relevant tests, docs, and examples +* Add websocket support to /container//attach/ws +* Add warning when net.ipv4.ip_forwarding = 0 +* Add hostname to environment +* Add last stable version in `docker version` +- Fix race conditions in parallel pull +- Fix Graph ByParent() to generate list of child images per parent image. +- Fix typo: fmt.Sprint -> fmt.Sprintf +- Fix small \n error un docker build +* Fix to "Inject dockerinit at /.dockerinit" +* Fix #910. print user name to docker info output +* Use Go 1.1.2 for dockerbuilder +* Use ranged for loop on channels +- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete +- Improve CMD, ENTRYPOINT, and attach docs. +- Improve connect message with socket error +- Load authConfig only when needed and fix useless WARNING +- Show tag used when image is missing +* Apply volumes-from before creating volumes +- Make docker run handle SIGINT/SIGTERM +- Prevent crash when .dockercfg not readable +- Install script should be fetched over https, not http. +* API, issue 1471: Use groups for socket permissions +- Correctly detect IPv4 forwarding +* Mount /dev/shm as a tmpfs +- Switch from http to https for get.docker.io +* Let userland proxy handle container-bound traffic +* Update the Docker CLI to specify a value for the "Host" header. +- Change network range to avoid conflict with EC2 DNS +- Reduce connect and read timeout when pinging the registry +* Parallel pull +- Handle ip route showing mask-less IP addresses +* Allow ENTRYPOINT without CMD +- Always consider localhost as a domain name when parsing the FQN repos name +* Refactor checksum + +#### Documentation + +* Add MongoDB image example +* Add instructions for creating and using the docker group +* Add sudo to examples and installation to documentation +* Add ufw doc +* Add a reference to ps -a +* Add information about Docker`s high level tools over LXC. +* Fix typo in docs for docker run -dns +* Fix a typo in the ubuntu installation guide +* Fix to docs regarding adding docker groups +* Update default -H docs +* Update readme with dependencies for building +* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 +* PostgreSQL service example in documentation +* Suggest installing linux-headers by default. +* Change the twitter handle +* Clarify Amazon EC2 installation +* 'Base' image is deprecated and should no longer be referenced in the docs. +* Move note about officially supported kernel +- Solved the logo being squished in Safari + +#### Builder + ++ Add USER instruction do Dockerfile ++ Add workdir support for the Buildfile +* Add no cache for docker build +- Fix docker build and docker events output +- Only count known instructions as build steps +- Make sure ENV instruction within build perform a commit each time +- Forbid certain paths within docker build ADD +- Repository name (and optionally a tag) in build usage +- Make sure ADD will create everything in 0755 + +#### Remote API + +* Sort Images by most recent creation date. +* Reworking opaque requests in registry module +* Add image name in /events +* Use mime pkg to parse Content-Type +* 650 http utils and user agent field + +#### Hack + ++ Bash Completion: Limit commands to containers of a relevant state +* Add docker dependencies coverage testing into docker-ci + +#### Packaging + ++ Docker-brew 0.5.2 support and memory footprint reduction +* Add new docker dependencies into docker-ci +- Revert "docker.upstart: avoid spawning a `sh` process" ++ Docker-brew and Docker standard library ++ Release docker with docker +* Fix the upstart script generated by get.docker.io +* Enabled the docs to generate manpages. +* Revert Bind daemon to 0.0.0.0 in Vagrant. + +#### Register + +* Improve auth push +* Registry unit tests + mock registry + +#### Tests + +* Improve TestKillDifferentUser to prevent timeout on buildbot +- Fix typo in TestBindMounts (runContainer called without image) +* Improve TestGetContainersTop so it does not rely on sleep +* Relax the lo interface test to allow iface index != 1 +* Add registry functional test to docker-ci +* Add some tests in server and utils + +#### Other + +* Contrib: bash completion script +* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host +* Don`t read from stdout when only attached to stdin + +## 0.5.3 (2013-08-13) + +#### Runtime + +* Use docker group for socket permissions +- Spawn shell within upstart script +- Handle ip route showing mask-less IP addresses +- Add hostname to environment + +#### Builder + +- Make sure ENV instruction within build perform a commit each time + +## 0.5.2 (2013-08-08) + +* Builder: Forbid certain paths within docker build ADD +- Runtime: Change network range to avoid conflict with EC2 DNS +* API: Change daemon to listen on unix socket by default + +## 0.5.1 (2013-07-30) + +#### Runtime + ++ Add `ps` args to `docker top` ++ Add support for container ID files (pidfile like) ++ Add container=lxc in default env ++ Support networkless containers with `docker run -n` and `docker -d -b=none` +* Stdout/stderr logs are now stored in the same file as JSON +* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. +* Change .dockercfg format to json and support multiple auth remote +- Do not override volumes from config +- Fix issue with EXPOSE override + +#### API + ++ Docker client now sets useragent (RFC 2616) ++ Add /events endpoint + +#### Builder + ++ ADD command now understands URLs ++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables +- Create directories with 755 instead of 700 within ADD instruction + +#### Hack + +* Simplify unit tests with helpers +* Improve docker.upstart event +* Add coverage testing into docker-ci + +## 0.5.0 (2013-07-17) + +#### Runtime + ++ List all processes running inside a container with 'docker top' ++ Host directories can be mounted as volumes with 'docker run -v' ++ Containers can expose public UDP ports (eg, '-p 123/udp') ++ Optionally specify an exact public port (eg. '-p 80:4500') +* 'docker login' supports additional options +- Dont save a container`s hostname when committing an image. + +#### Registry + ++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries +- Fix issues when uploading images to a private registry + +#### Builder + ++ ENTRYPOINT instruction sets a default binary entry point to a container ++ VOLUME instruction marks a part of the container as persistent data +* 'docker build' displays the full output of a build by default + +## 0.4.8 (2013-07-01) + ++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. +- Tests: Fix issues in the test suite + +## 0.4.7 (2013-06-28) + +#### Remote API + +* The progress bar updates faster when downloading and uploading large files +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version ++ Host directories can be mounted as volumes with 'docker run -b' +- fix an issue when only attaching to stdin +* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts + +#### Hack + +* Improve test suite and dev environment +* Remove dependency on unit tests on 'os/user' + +#### Other + +* Registry: easier push/pull to a custom registry ++ Documentation: add terminology section + +## 0.4.6 (2013-06-22) + +- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. + +## 0.4.5 (2013-06-21) + ++ Builder: 'docker build git://URL' fetches and builds a remote git repository +* Runtime: 'docker ps -s' optionally prints container size +* Tests: improved and simplified +- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. +- Builder: fix a regression when using ADD with single regular file. + +## 0.4.4 (2013-06-19) + +- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. + +## 0.4.3 (2013-06-19) + +#### Builder + ++ ADD of a local file will detect tar archives and unpack them +* ADD improvements: use tar for copy + automatically unpack local archives +* ADD uses tar/untar for copies instead of calling 'cp -ar' +* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. +- Fix a bug which caused builds to fail if ADD was the first command +* Nicer output for 'docker build' + +#### Runtime + +* Remove bsdtar dependency +* Add unix socket and multiple -H support +* Prevent rm of running containers +* Use go1.1 cookiejar +- Fix issue detaching from running TTY container +- Forbid parallel push/pull for a single image/repo. Fixes #311 +- Fix race condition within Run command when attaching. + +#### Client + +* HumanReadable ProgressBar sizes in pull +* Fix docker version`s git commit output + +#### API + +* Send all tags on History API call +* Add tag lookup to history command. Fixes #882 + +#### Documentation + +- Fix missing command in irc bouncer example + +## 0.4.2 (2013-06-17) + +- Packaging: Bumped version to work around an Ubuntu bug + +## 0.4.1 (2013-06-17) + +#### Remote Api + ++ Add flag to enable cross domain requests ++ Add images and containers sizes in docker ps and docker images + +#### Runtime + ++ Configure dns configuration host-wide with 'docker -d -dns' ++ Detect faulty DNS configuration and replace it with a public default ++ Allow docker run : ++ You can now specify public port (ex: -p 80:4500) +* Improve image removal to garbage-collect unreferenced parents + +#### Client + +* Allow multiple params in inspect +* Print the container id before the hijack in `docker run` + +#### Registry + +* Add regexp check on repo`s name +* Move auth to the client +- Remove login check on pull + +#### Other + +* Vagrantfile: Add the rest api port to vagrantfile`s port_forward +* Upgrade to Go 1.1 +- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n + +## 0.4.0 (2013-06-03) + +#### Builder + ++ Introducing Builder ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + +#### Remote API + ++ Introducing Remote API ++ control Docker programmatically using a simple HTTP/json API + +#### Runtime + +* Various reliability and usability improvements + +## 0.3.4 (2013-05-30) + +#### Builder + ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile ++ 'docker build -t FOO' applies the tag FOO to the newly built container. + +#### Runtime + ++ Interactive TTYs correctly handle window resize +* Fix how configuration is merged between layers + +#### Remote API + ++ Split stdout and stderr on 'docker run' ++ Optionally listen on a different IP and port (use at your own risk) + +#### Documentation + +* Improve install instructions. + +## 0.3.3 (2013-05-23) + +- Registry: Fix push regression +- Various bugfixes + +## 0.3.2 (2013-05-09) + +#### Registry + +* Improve the checksum process +* Use the size to have a good progress bar while pushing +* Use the actual archive if it exists in order to speed up the push +- Fix error 400 on push + +#### Runtime + +* Store the actual archive on commit + +## 0.3.1 (2013-05-08) + +#### Builder + ++ Implement the autorun capability within docker builder ++ Add caching to docker builder ++ Add support for docker builder with native API as top level command ++ Implement ENV within docker builder +- Check the command existence prior create and add Unit tests for the case +* use any whitespaces instead of tabs + +#### Runtime + ++ Add go version to debug infos +* Kernel version - don`t show the dash if flavor is empty + +#### Registry + ++ Add docker search top level command in order to search a repository +- Fix pull for official images with specific tag +- Fix issue when login in with a different user and trying to push +* Improve checksum - async calculation + +#### Images + ++ Output graph of images to dot (graphviz) +- Fix ByParent function + +#### Documentation + ++ New introduction and high-level overview ++ Add the documentation for docker builder +- CSS fix for docker documentation to make REST API docs look better. +- Fix CouchDB example page header mistake +- Fix README formatting +* Update www.docker.io website. + +#### Other + ++ Website: new high-level overview +- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc +* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker + +## 0.3.0 (2013-05-06) + +#### Runtime + +- Fix the command existence check +- strings.Split may return an empty string on no match +- Fix an index out of range crash if cgroup memory is not + +#### Documentation + +* Various improvements ++ New example: sharing data between 2 couchdb databases + +#### Other + +* Vagrant: Use only one deb line in /etc/apt ++ Registry: Implement the new registry + +## 0.2.2 (2013-05-03) + ++ Support for data volumes ('docker run -v=PATH') ++ Share data volumes between containers ('docker run -volumes-from') ++ Improve documentation +* Upgrade to Go 1.0.3 +* Various upgrades to the dev environment for contributors + +## 0.2.1 (2013-05-01) + ++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. +* Improve install process on Vagrant ++ New Dockerfile operation: "maintainer" ++ New Dockerfile operation: "expose" ++ New Dockerfile operation: "cmd" ++ Contrib script to build a Debian base layer ++ 'docker -d -r': restart crashed containers at daemon startup +* Runtime: improve test coverage + +## 0.2.0 (2013-04-23) + +- Runtime: ghost containers can be killed and waited for +* Documentation: update install instructions +- Packaging: fix Vagrantfile +- Development: automate releasing binaries and ubuntu packages ++ Add a changelog +- Various bugfixes + +## 0.1.8 (2013-04-22) + +- Dynamically detect cgroup capabilities +- Issue stability warning on kernels <3.8 +- 'docker push' buffers on disk instead of memory +- Fix 'docker diff' for removed files +- Fix 'docker stop' for ghost containers +- Fix handling of pidfile +- Various bugfixes and stability improvements + +## 0.1.7 (2013-04-18) + +- Container ports are available on localhost +- 'docker ps' shows allocated TCP ports +- Contributors can run 'make hack' to start a continuous integration VM +- Streamline ubuntu packaging & uploading +- Various bugfixes and stability improvements + +## 0.1.6 (2013-04-17) + +- Record the author an image with 'docker commit -author' + +## 0.1.5 (2013-04-17) + +- Disable standalone mode +- Use a custom DNS resolver with 'docker -d -dns' +- Detect ghost containers +- Improve diagnosis of missing system capabilities +- Allow disabling memory limits at compile time +- Add debian packaging +- Documentation: installing on Arch Linux +- Documentation: running Redis on docker +- Fix lxc 0.9 compatibility +- Automatically load aufs module +- Various bugfixes and stability improvements + +## 0.1.4 (2013-04-09) + +- Full support for TTY emulation +- Detach from a TTY session with the escape sequence `C-p C-q` +- Various bugfixes and stability improvements +- Minor UI improvements +- Automatically create our own bridge interface 'docker0' + +## 0.1.3 (2013-04-04) + +- Choose TCP frontend port with '-p :PORT' +- Layer format is versioned +- Major reliability improvements to the process manager +- Various bugfixes and stability improvements + +## 0.1.2 (2013-04-03) + +- Set container hostname with 'docker run -h' +- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' +- Various bugfixes and stability improvements +- UI polish +- Progress bar on push/pull +- Use XZ compression by default +- Make IP allocator lazy + +## 0.1.1 (2013-03-31) + +- Display shorthand IDs for convenience +- Stabilize process management +- Layers can include a commit message +- Simplified 'docker attach' +- Fix support for re-attaching +- Various bugfixes and stability improvements +- Auto-download at run +- Auto-login on push +- Beefed up documentation + +## 0.1.0 (2013-03-23) + +Initial public release + +- Implement registry in order to push/pull images +- TCP port allocation +- Fix termcaps on Linux +- Add documentation +- Add Vagrant support with Vagrantfile +- Add unit tests +- Add repository/tags to ease image management +- Improve the layer implementation diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..824d1821 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,435 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/project/who-written-for/). + +![Contributors guide](docs/static_files/contributors.png) + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com), + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/docker/docker/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, add a quick "+1" or "I have this problem too." Doing this +helps prioritize the most common problems and requests. + +When reporting issues, please include your host OS (Ubuntu 12.04, Fedora 19, +etc). Please include: + +* The output of `uname -a`. +* The output of `docker version`. +* The output of `docker -D info`. + +Please also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. + +**Issue Report Template**: + +``` +Description of problem: + + +`docker version`: + + +`docker info`: + + +`uname -a`: + + +Environment details (AWS, VirtualBox, physical, etc.): + + +How reproducible: + + +Steps to Reproduce: +1. +2. +3. + + +Actual Results: + + +Expected Results: + + +Additional info: + + + +``` + + +##Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +###Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/docker/docker/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/project/make-a-contribution/). + +### Design and cleanup proposals + +You can propose new designs for existing Docker features. You can also design +entirely new features. We really appreciate contributors who want to refactor or +otherwise cleanup our project. For information on making these types of +contributions, see [the advanced contribution +section](https://docs.docker.com/project/advanced-contributing/) in the +contributors guide. + +We try hard to keep Docker lean and focused. Docker can't do everything for +everybody. This means that we might decide against incorporating a new feature. +However, there might be a way to implement that feature *on top of* Docker. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + +
Internet Relay Chat (IRC) +

+ IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+ Read our IRC quickstart guide for an easy way to get started. +
Google Groups + There are two groups. + Docker-user + is for people using Docker containers. + The docker-dev + group is for contributors and other people contributing to the Docker + project. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 7000K Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](https://docs.docker.com/project/test-and-docs/) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/project/doc-style) and instructions on [building +the documentation](https://docs.docker.com/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, [make sure the test suite passes] +((https://docs.docker.com/project/test-and-docs/)). Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that +close an issue. Including references automatically closes the issue on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still +accepted, so there is no need to update outstanding pull requests to the new +format right away, but please do adjust your processes for future contributions. + +### How can I become a maintainer? + +* Step 1: Learn the component inside out +* Step 2: Make yourself useful by contributing code, bug fixes, support etc. +* Step 3: Volunteer on the IRC channel (#docker at Freenode) +* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +### IRC meetings + +There are two monthly meetings taking place on #docker-dev IRC to accommodate all +timezones. Anybody can propose a topic for discussion prior to the meeting. + +If you feel the conversation is going off-topic, feel free to point it out. + +For the exact dates and times, have a look at [the irc-minutes +repo](https://github.com/docker/irc-minutes). The minutes also contain all the +notes from previous meetings. + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to it's context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant it's own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..fdb58ad7 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,217 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 +RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + mercurial \ + parallel \ + python-mock \ + python-pip \ + python-websocket \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + ubuntu-zfs \ + libzfs-dev \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install lxc +ENV LXC_VERSION 1.1.2 +RUN mkdir -p /usr/src/lxc \ + && curl -sSL https://linuxcontainers.org/downloads/lxc/lxc-${LXC_VERSION}.tar.gz | tar -v -C /usr/src/lxc/ -xz --strip-components=1 +RUN cd /usr/src/lxc \ + && ./configure \ + && make \ + && make install \ + && ldconfig + +# Install Go +ENV GO_VERSION 1.4.2 +RUN curl -sSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/local -xz \ + && mkdir -p /go/bin +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm \ + windows/amd64 windows/386 + +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src \ + && set -x \ + && for platform in $DOCKER_CROSSPLATFORMS; do \ + GOOS=${platform%/*} \ + GOARCH=${platform##*/} \ + ./make.bash --no-clean 2>&1; \ + done + +# This has been commented out and kept as reference because we don't support compiling with older Go anymore. +# ENV GOFMT_VERSION 1.3.3 +# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt + +# Update this sha when we upgrade to go 1.5.0 +ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9 +# Grab Go's cover tool for dead-simple code coverage testing +# Grab Go's vet tool for examining go code to find suspicious constructs +# and help prevent errors that the compiler might not catch +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ + && go install -v golang.org/x/tools/cmd/cover \ + && go install -v golang.org/x/tools/cmd/vet +# Grab Go's lint tool +ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.3.2 + +# Install registry +ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary server +ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT 8a87001d09852058f08a807ab6e8491d57ca1e88 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT + +# Setup s3cmd config +RUN { \ + echo '[default]'; \ + echo 'access_key=$AWS_ACCESS_KEY'; \ + echo 'secret_key=$AWS_SECRET_KEY'; \ + } > ~/.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image.sh /docker-frozen-images \ + busybox:latest@8c2e06607696bd4afb3d03b687e361cc43cf8ec1a4a725bc96e39f05ba97dd55 \ + hello-world:frozen@91c95931e552b11604fea91c2f537284149ec32fff0f700a4769cfd31d7696ae \ + jess/unshare@5c9f6ea50341a2a8eb6677527f2bdedbf331ae894a41714fda770fb130f3314d +# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Download man page generator +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone -b v1.0.3 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ + && git clone -b v1.2 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ + && go get -v -d github.com/cpuguy83/go-md2man \ + && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ + && rm -rf "$GOPATH" + +# Download toml validator +ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ + && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ + && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ + && rm -rf "$GOPATH" + +# Build/install the tool for embedding resources in Windows binaries +ENV RSRC_COMMIT e48dbf1b7fc464a9e85fcec450dddf80816b76e0 +RUN set -x \ + && git clone https://github.com/akavel/rsrc.git /go/src/github.com/akavel/rsrc \ + && cd /go/src/github.com/akavel/rsrc \ + && git checkout -q $RSRC_COMMIT \ + && go install -v + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.simple b/Dockerfile.simple new file mode 100644 index 00000000..12ee7dde --- /dev/null +++ b/Dockerfile.simple @@ -0,0 +1,34 @@ +# docker build -t docker:simple -f Dockerfile.simple . +# docker run --rm docker:simple hack/make.sh dynbinary +# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit +# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli + +# This represents the bare minimum required to build and test Docker. + +FROM debian:jessie + +# compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + btrfs-tools \ + curl \ + gcc \ + git \ + golang \ + libdevmapper-dev \ + libsqlite3-dev \ + \ + ca-certificates \ + e2fsprogs \ + iptables \ + procps \ + xz-utils \ + \ + aufs-tools \ + lxc \ + && rm -rf /var/lib/apt/lists/* + +ENV AUTO_GOPATH 1 +WORKDIR /usr/src/docker +COPY . /usr/src/docker diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..c7a3f0cf --- /dev/null +++ b/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 00000000..e3bcb581 --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,618 @@ +# Docker maintainers file +# +# This file describes who runs the Docker project and how. +# This is a living document - if you see something out of date or missing, +# speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. + +[Rules] + + [Rules.maintainers] + + title = "What is a maintainer?" + + text = """ +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. +""" + + [Rules.bdfl] + + title = "The Benevolent dictator for life (BDFL)" + + text = """ +Docker follows the timeless, highly efficient and totally unfair system +known as [Benevolent dictator for +life](https://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with +yours truly, Solomon Hykes, in the role of BDFL. This means that all +decisions are made, by default, by Solomon. Since making every decision +myself would be highly un-scalable, in practice decisions are spread +across multiple maintainers. + +Ideally, the BDFL role is like the Queen of England: awesome crown, but not +an actual operational role day-to-day. The real job of a BDFL is to NEVER GO AWAY. +Every other rule can change, perhaps drastically so, but the BDFL will always +be there, preserving the philosophy and principles of the project, and keeping +ultimate authority over its fate. This gives us great flexibility in experimenting +with various governance models, knowing that we can always press the "reset" button +without fear of fragmentation or deadlock. See the US congress for a counter-example. + +BDFL daily routine: + +* Is the project governance stuck in a deadlock or irreversibly fragmented? + * If yes: refactor the project governance +* Are there issues or conflicts escalated by core? + * If yes: resolve them +* Go back to polishing that crown. +""" + + [Rules.decisions] + + title = "How are decisions made?" + + text = """ +Short answer: EVERYTHING IS A PULL REQUEST. + +Docker is an open-source project with an open design philosophy. This +means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, road map, and APIs. *If it's +part of the project, it's in the repo. If it's in the repo, it's part of +the project.* + +As a result, all decisions can be expressed as changes to the +repository. An implementation change is a change to the source code. An +API change is a change to the API specification. A philosophy change is +a change to the philosophy manifesto, and so on. + +All decisions affecting Docker, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. See *review flow* +for details. + +Because Docker is such a large and active project, it's important for everyone to know +who is responsible for deciding what. That is determined by a precise set of rules. + +* For every *decision* in the project, the rules should designate, in a deterministic way, +who should *decide*. + +* For every *problem* in the project, the rules should designate, in a deterministic way, +who should be responsible for *fixing* it. + +* For every *question* in the project, the rules should designate, in a deterministic way, +who should be expected to have the *answer*. +""" + + [Rules.review] + + title = "Review flow" + + text = """ +Pull requests should be processed according to the following flow: + +* For each subsystem affected by the change, the maintainers of the subsystem must approve or refuse it. +It is the responsibility of the subsystem maintainers to process patches affecting them in a timely +manner. + +* If the change affects areas of the code which are not part of a subsystem, +or if subsystem maintainers are unable to reach a timely decision, it must be approved by +the core maintainers. + +* If the change affects the UI or public APIs, or if it represents a major change in architecture, +the architects must approve or refuse it. + +* If the change affects the operations of the project, it must be approved or rejected by +the relevant operators. + +* If the change affects the governance, philosophy, goals or principles of the project, +it must be approved by BDFL. +""" + + [Rules.DCO] + + title = "Helping contributors with the DCO" + + text = """ +The [DCO or `Sign your work`]( +https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some Docker contributors are not as familiar with `git`, or have used a web based +editor, and thus asking them to `git commit --amend -s` is not the best way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. The +most trivial way for a contributor to allow the maintainer to do this, is to add +a DCO signature in a Pull Requests's comment, or a maintainer can simply note that +the change is sufficiently trivial that it does not substantivly change the existing +contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. +""" + + [Rules.holiday] + + title = "I'm a maintainer, and I'm going on holiday" + + text = """ +Please let your co-maintainers and other contributors know by raising a pull +request that comments out your `MAINTAINERS` file entry using a `#`. +""" + + [Rules."no direct push"] + + title = "I'm a maintainer. Should I make pull requests too?" + + text = """ +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. +""" + + [Rules.meta] + + title = "How is this process changed?" + + text = "Just like everything else: by making a pull request :)" + +# Current project organization +[Org] + + bdfl = "shykes" + + # The chief architect is responsible for the overall integrity of the technical architecture + # across all subsystems, and the consistency of APIs and UI. + # + # Changes to UI, public APIs and overall architecture (for example a plugin system) must + # be approved by the chief architect. + "Chief Architect" = "shykes" + + # The Chief Operator is responsible for the day-to-day operations of the project including: + # - facilitating communications amongst all the contributors; + # - tracking release schedules; + # - managing the relationship with downstream distributions and upstream dependencies; + # - helping new contributors to get involved and become successful contributors and maintainers + # + # The role is also responsible for managing and measuring the success of the overall project + # and ensuring it is governed properly working in concert with the Docker Governance Advisory Board (DGAB). + "Chief Operator" = "spf13" + + [Org.Operators] + + # The operators make sure the trains run on time. They are responsible for overall operations + # of the project. This includes facilitating communication between all the participants; helping + # newcomers get involved and become successful contributors and maintainers; tracking the schedule + # of releases; managing the relationship with downstream distributions and upstream dependencies; + # define measures of success for the project and measure progress; Devise and implement tools and + # processes which make contributors and maintainers happier and more efficient. + + + [Org.Operators.security] + + people = [ + "erw", + "diogomonica", + "nathanmccauley" + ] + + [Org.Operators."monthly meetings"] + + people = [ + "sven", + "tianon" + ] + + [Org.Operators.infrastructure] + + people = [ + "jfrazelle", + "crosbymichael" + ] + + [Org.Operators.community] + people = [ + "theadactyl" + ] + + # The chief maintainer is responsible for all aspects of quality for the project including + # code reviews, usability, stability, security, performance, etc. + # The most important function of the chief maintainer is to lead by example. On the first + # day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll + # be fine". + "Chief Maintainer" = "crosbymichael" + + # The community manager is responsible for serving the project community, including users, + # contributors and partners. This involves: + # - facilitating communication between maintainers, contributors and users + # - organizing contributor and maintainer events + # - helping new contributors get involved + # - anything the project community needs to be successful + # + # The community manager is a point of contact for any contributor who has questions, concerns + # or feedback about project operations. + "Community Manager" = "theadactyl" + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + # For each release (including minor releases), a "release captain" is assigned from the + # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure + # the release process is clear and up-to-date. + # + # It is common for core maintainers to "branch out" to join or start a subsystem. + + + + people = [ + "calavera", + "crosbymichael", + "erikh", + "estesp", + "icecrime", + "jfrazelle", + "lk4d4", + "runcom", + "tibor", + "unclejack", + "vbatts", + "vieux", + "vishh" + ] + + + [Org.Subsystems] + + # As the project grows, it gets separated into well-defined subsystems. Each subsystem + # has a dedicated group of maintainers, which are dedicated to that subsytem and responsible + # for its quality. + # This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows. + # + # The maintainers of each subsytem are responsible for: + # + # 1. Exposing a clear road map for improving their subsystem. + # 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem. + # 3. Be available to anyone with questions, bug reports, criticism etc. + # on their component. This includes IRC, GitHub requests and the mailing + # list. + # 4. Make sure their subsystem respects the philosophy, design and + # road map of the project. + # + # #### How to review patches to your subsystem + # + # Accepting pull requests: + # + # - If the pull request appears to be ready to merge, give it a `LGTM`, which + # stands for "Looks Good To Me". + # - If the pull request has some small problems that need to be changed, make + # a comment adressing the issues. + # - If the changes needed to a PR are small, you can add a "LGTM once the + # following comments are adressed..." this will reduce needless back and + # forth. + # - If the PR only needs a few changes before being merged, any MAINTAINER can + # make a replacement PR that incorporates the existing commits and fixes the + # problems before a fast track merge. + # + # Closing pull requests: + # + # - If a PR appears to be abandoned, after having attempted to contact the + # original contributor, then a replacement PR may be made. Once the + # replacement PR is made, any contributor may close the original one. + # - If you are not sure if the pull request implements a good feature or you + # do not understand the purpose of the PR, ask the contributor to provide + # more documentation. If the contributor is not able to adequately explain + # the purpose of the PR, the PR may be closed by any MAINTAINER. + # - If a MAINTAINER feels that the pull request is sufficiently architecturally + # flawed, or if the pull request needs significantly more design discussion + # before being considered, the MAINTAINER should close the pull request with + # a short explanation of what discussion still needs to be had. It is + # important not to leave such pull requests open, as this will waste both the + # MAINTAINER's time and the contributor's time. It is not good to string a + # contributor on for weeks or months, having them make many changes to a PR + # that will eventually be rejected. + + [Org.Subsystems.Documentation] + + people = [ + "fredlf", + "james", + "moxiegirl", + "thaJeztah", + "jamtur01", + "spf13", + "sven" + ] + + [Org.Subsystems.libcontainer] + + people = [ + "crosbymichael", + "jnagal", + "lk4d4", + "mpatel", + "vmarmol" + ] + + [Org.Subsystems.registry] + + people = [ + "dmcg", + "dmp42", + "jlhawn", + "samalba", + "sday", + "vbatts" + ] + + [Org.Subsystems."build tools"] + + people = [ + "shykes", + "tianon" + ] + + [Org.Subsystem."remote api"] + + people = [ + "vieux" + ] + + [Org.Subsystem.swarm] + + people = [ + "aluzzardi", + "vieux" + ] + + [Org.Subsystem.machine] + + people = [ + "bfirsh", + "ehazlett" + ] + + [Org.Subsystem.compose] + + people = [ + "aanand" + ] + + [Org.Subsystem.builder] + + people = [ + "duglin", + "erikh", + "tibor" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "thajeztah" + ] + + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aanand] + Name = "Aanand Prasad" + Email = "aanand@docker.com" + GitHub = "aanand" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "aluzzardi@docker.com" + GitHub = "aluzzardi" + + [people.bfirsh] + Name = "Ben Firshman" + Email = "ben@firshman.co.uk" + GitHub = "bfirsh" + + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + Github = "cpuguy83" + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.diogomonica] + Name = "Diogo Monica" + Email = "diogo@docker.com" + GitHub = "diogomonica" + + [people.duglin] + Name = "Doug Davis" + Email = "dug@us.ibm.com" + GitHub = "duglin" + + [people.dmcg] + Name = "Derek McGowan" + Email = "derek@docker.com" + Github = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + Github = "dmp42" + + [people.ehazlett] + Name = "Evan Hazlett" + Email = "ejhazlett@gmail.com" + GitHub = "ehazlett" + + [people.erikh] + Name = "Erik Hollensbe" + Email = "erik@docker.com" + GitHub = "erikh" + + [people.erw] + Name = "Eric Windisch" + Email = "eric@windisch.us" + GitHub = "ewindisch" + + [people.estesp] + Name = "Phil Estes" + Email = "estesp@linux.vnet.ibm.com" + GitHub = "estesp" + + [people.fredlf] + Name = "Fred Lifton" + Email = "fred.lifton@docker.com" + GitHub = "fredlf" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "arnaud@docker.com" + GitHub = "icecrime" + + [people.jfrazelle] + Name = "Jessie Frazelle" + Email = "j@docker.com" + GitHub = "jfrazelle" + + [people.jlhawn] + Name = "Josh Hawn" + Email = "josh.hawn@docker.com" + Github = "jlhawn" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.moxiegirl] + Name = "Mary Anthony" + Email = "mary.anthony@docker.com" + GitHub = "moxiegirl" + + [people.nathanmccauley] + Name = "Nathan McCauley" + Email = "nathan.mccauley@docker.com" + GitHub = "nathanmccauley" + + [people.runcom] + Name = "Antonio Murdaca" + Email = "me@runcom.ninja" + GitHub = "runcom" + + [people.sday] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + Github = "stevvooe" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.spf13] + Name = "Steve Francia" + Email = "steve.francia@gmail.com" + GitHub = "spf13" + + [people.sven] + Name = "Sven Dowideit" + Email = "SvenDowideit@home.org.au" + GitHub = "SvenDowideit" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.theadactyl] + Name = "Thea Lamkin" + Email = "thea@docker.com" + GitHub = "theadactyl" + + [people.tianon] + Name = "Tianon Gravi" + Email = "admwiggin@gmail.com" + GitHub = "tianon" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.vbatts] + Name = "Vincent Batts" + Email = "vbatts@redhat.com" + GitHub = "vbatts" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + + [people.vmarmol] + Name = "Victor Marmol" + Email = "vmarmol@google.com" + GitHub = "vmarmol" + + [people.jnagal] + Name = "Rohit Jnagal" + Email = "jnagal@google.com" + GitHub = "rjnagal" + + [people.mpatel] + Name = "Mrunal Patel" + Email = "mpatel@redhat.com" + GitHub = "mrunalp" + + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + GitHub = "unclejack" + + [people.vishh] + Name = "Vishnu Kannan" + Email = "vishnuk@google.com" + GitHub = "vishh" diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..a3d3a0c8 --- /dev/null +++ b/Makefile @@ -0,0 +1,80 @@ +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration-cli test-docker-py validate + +# env vars passed through directly to Docker's build scripts +# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily +# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILDFLAGS \ + -e DOCKER_CLIENTONLY \ + -e DOCKER_DEBUG \ + -e DOCKER_EXECDRIVER \ + -e DOCKER_EXPERIMENTAL \ + -e DOCKER_GRAPHDRIVER \ + -e DOCKER_STORAGE_OPTS \ + -e DOCKER_USERLANDPROXY \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` +# (default to no bind mount if DOCKER_HOST is set) +# note: BINDDIR is supported for backwards-compatibility here +BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) +DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") + + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) + +DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" + +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE + +# for some docs workarounds (see below in "docs-build" target) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) + +default: binary + +all: build + $(DOCKER_RUN_DOCKER) hack/make.sh + +binary: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary + +cross: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary cross + +deb: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary build-deb + +rpm: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary build-rpm + +test: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration-cli test-docker-py + +test-unit: build + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit + +test-integration-cli: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli + +test-docker-py: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary test-docker-py + +validate: build + $(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-gofmt validate-pkg validate-lint validate-test validate-toml validate-vet + +shell: build + $(DOCKER_RUN_DOCKER) bash + +build: bundles + docker build -t "$(DOCKER_IMAGE)" . + +bundles: + mkdir bundles + +docs: + $(MAKE) -C docs docs diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..6e6f469a --- /dev/null +++ b/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/README.md b/README.md new file mode 100644 index 00000000..86f45a3d --- /dev/null +++ b/README.md @@ -0,0 +1,295 @@ +Docker: the Linux container engine +================================== + +Docker is an open source project to pack, ship and run any application +as a lightweight container. + +Docker containers are both *hardware-agnostic* and *platform-agnostic*. +This means they can run anywhere, from your laptop to the largest +EC2 compute instance and everything in between - and they don't require +you to use a particular language, framework or packaging system. That +makes them great building blocks for deploying and scaling web apps, +databases, and backend services without depending on a particular stack +or provider. + +Docker began as an open-source implementation of the deployment engine which +powers [dotCloud](https://www.dotcloud.com), a popular Platform-as-a-Service. +It benefits directly from the experience accumulated over several years +of large-scale operation and support of hundreds of thousands of +applications and databases. + +![Docker L](docs/static_files/docker-logo-compressed.png "Docker") + +## Security Disclosure + +Security is very important to us. If you have any issue regarding security, +please disclose the information responsibly by sending an email to +security@docker.com and not by creating a github issue. + +## Better than VMs + +A common method for distributing applications and sandboxing their +execution is to use virtual machines, or VMs. Typical VM formats are +VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory +these formats should allow every developer to automatically package +their application into a "machine" for easy distribution and deployment. +In practice, that almost never happens, for a few reasons: + + * *Size*: VMs are very large which makes them impractical to store + and transfer. + * *Performance*: running VMs consumes significant CPU and memory, + which makes them impractical in many scenarios, for example local + development of multi-tier applications, and large-scale deployment + of cpu and memory-intensive applications on large numbers of + machines. + * *Portability*: competing VM environments don't play well with each + other. Although conversion tools do exist, they are limited and + add even more overhead. + * *Hardware-centric*: VMs were designed with machine operators in + mind, not software developers. As a result, they offer very + limited tooling for what developers need most: building, testing + and running their software. For example, VMs offer no facilities + for application versioning, monitoring, configuration, logging or + service discovery. + +By contrast, Docker relies on a different sandboxing method known as +*containerization*. Unlike traditional virtualization, containerization +takes place at the kernel level. Most modern operating system kernels +now support the primitives necessary for containerization, including +Linux with [openvz](https://openvz.org), +[vserver](http://linux-vserver.org) and more recently +[lxc](https://linuxcontainers.org/), Solaris with +[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), +and FreeBSD with +[Jails](https://www.freebsd.org/doc/handbook/jails.html). + +Docker builds on top of these low-level primitives to offer developers a +portable format and runtime environment that solves all four problems. +Docker containers are small (and their transfer can be optimized with +layers), they have basically zero memory and cpu overhead, they are +completely portable, and are designed from the ground up with an +application-centric design. + +Perhaps best of all, because Docker operates at the OS level, it can still be +run inside a VM! + +## Plays well with others + +Docker does not require you to buy into a particular programming +language, framework, packaging system, or configuration language. + +Is your application a Unix process? Does it use files, tcp connections, +environment variables, standard Unix streams and command-line arguments +as inputs and outputs? Then Docker can run it. + +Can your application's build be expressed as a sequence of such +commands? Then Docker can build it. + +## Escape dependency hell + +A common problem for developers is the difficulty of managing all +their application's dependencies in a simple and automated way. + +This is usually difficult for several reasons: + + * *Cross-platform dependencies*. Modern applications often depend on + a combination of system libraries and binaries, language-specific + packages, framework-specific modules, internal components + developed for another project, etc. These dependencies live in + different "worlds" and require different tools - these tools + typically don't work well with each other, requiring awkward + custom integrations. + + * *Conflicting dependencies*. Different applications may depend on + different versions of the same dependency. Packaging tools handle + these situations with various degrees of ease - but they all + handle them in different and incompatible ways, which again forces + the developer to do extra work. + + * *Custom dependencies*. A developer may need to prepare a custom + version of their application's dependency. Some packaging systems + can handle custom versions of a dependency, others can't - and all + of them handle it differently. + + +Docker solves the problem of dependency hell by giving the developer a simple +way to express *all* their application's dependencies in one place, while +streamlining the process of assembling them. If this makes you think of +[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't +*replace* your favorite packaging systems. It simply orchestrates +their use in a simple and repeatable way. How does it do that? With +layers. + +Docker defines a build as running a sequence of Unix commands, one +after the other, in the same container. Build commands modify the +contents of the container (usually by installing new files on the +filesystem), the next command modifies it some more, etc. Since each +build command inherits the result of the previous commands, the +*order* in which the commands are executed expresses *dependencies*. + +Here's a typical Docker build process: + +```bash +FROM ubuntu:12.04 +RUN apt-get update && apt-get install -y python python-pip curl +RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv +RUN cd helloflask-master && pip install -r requirements.txt +``` + +Note that Docker doesn't care *how* dependencies are built - as long +as they can be built by running a Unix command in a container. + + +Getting started +=============== + +Docker can be installed on your local machine as well as servers - both +bare metal and virtualized. It is available as a binary on most modern +Linux systems, or as a VM on Windows, Mac and other systems. + +We also offer an [interactive tutorial](https://www.docker.com/tryit/) +for quickly learning the basics of using Docker. + +For up-to-date install instructions, see the [Docs](https://docs.docker.com). + +Usage examples +============== + +Docker can be used to run short-lived commands, long-running daemons +(app servers, databases, etc.), interactive shell sessions, etc. + +You can find a [list of real-world +examples](https://docs.docker.com/examples/) in the +documentation. + +Under the hood +-------------- + +Under the hood, Docker is built on the following components: + +* The + [cgroups](https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt) + and + [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) + capabilities of the Linux kernel +* The [Go](https://golang.org) programming language +* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) +* The [Libcontainer Specification](https://github.com/docker/libcontainer/blob/master/SPEC.md) + +Contributing to Docker +====================== + +[![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) +[![Jenkins Build Status](https://jenkins.dockerproject.org/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/job/Docker%20Master/) + +Want to hack on Docker? Awesome! We have [instructions to help you get +started contributing code or documentation](https://docs.docker.com/project/who-written-for/). + +These instructions are probably not perfect, please let us know if anything +feels wrong or incomplete. Better yet, submit a PR and improve them yourself. + +Getting the development builds +============================== + +Want to run Docker from a master build? You can download +master builds at [master.dockerproject.org](https://master.dockerproject.org). +They are updated with each commit merged into the master branch. + +Don't know how to use that super cool new feature in the master build? Check +out the master docs at +[docs.master.dockerproject.org](http://docs.master.dockerproject.org). + +How the project is run +====================== + +Docker is a very, very active project. If you want to learn more about how it is run, +or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). + +We are always open to suggestions on process improvements, and are always looking for more maintainers. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + +
Internet Relay Chat (IRC) +

+ IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+ Read our IRC quickstart guide for an easy way to get started. +
Google Groups + There are two groups. + Docker-user + is for people using Docker containers. + The docker-dev + group is for contributors and other people contributing to the Docker + project. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 7000K Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ +### Legal + +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + + +Licensing +========= +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. + +Other Docker Related Projects +============================= +There are a number of projects under development that are based on Docker's +core technology. These projects expand the tooling built around the +Docker platform to broaden its application and utility. + +* [Docker Registry](https://github.com/docker/distribution): Registry +server for Docker (hosting/delivery of repositories and images) +* [Docker Machine](https://github.com/docker/machine): Machine management +for a container-centric world +* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering +system +* [Docker Compose](https://github.com/docker/compose) (formerly Fig): +Define and run multi-container apps +* [Kitematic](https://github.com/kitematic/kitematic): The easiest way to use +Docker on Mac and Windows + +If you know of another project underway that should be listed here, please help +us keep this list up-to-date by submitting a PR. diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 00000000..7a3deaa2 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,183 @@ +Docker Engine Roadmap +===================== + +### How should I use this document? + +This document provides description of items that the project decided to prioritize. This should +serve as a reference point for Docker contributors to understand where the project is going, and +help determine if a contribution could be conflicting with some longer terms plans. + +The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be +refused (except for those mentioned as "frozen features" below)! We are always happy to receive +patches for new cool features we haven't thought about, or didn't judge priority. Please however +understand that such patches might take longer for us to review. + +### How can I help? + +Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described +in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our +goal is to split down the workload in such way that anybody can jump in and help. Please comment on +issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already +assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is +the best way to go. + +### How can I add something to the roadmap? + +The roadmap process is new to the Docker Engine: we are only beginning to structure and document the +project objectives. Our immediate goal is to be more transparent, and work with our community to +focus our efforts on fewer prioritized topics. + +We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but +we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we +won't be accepting pull requests adding or removing items from this file. + +# 1. Features and refactoring + +## 1.1 Security + +Security is a top objective for the Docker Engine. The most notable items we intend to provide in +the near future are: + +- Trusted distribution of images: the effort is driven by the [distribution](https://github.com/docker/distribution) +group but will have significant impact on the Engine +- [User namespaces](https://github.com/docker/docker/pull/12648) +- [Seccomp support](https://github.com/docker/libcontainer/pull/613) + +## 1.2 Plumbing project + +We define a plumbing tool as a standalone piece of software usable and meaningful on its own. In +the current state of the Docker Engine, most subsystems provide independent functionalities (such +the builder, pushing and pulling images, running applications in a containerized environment, etc) +but all are coupled in a single binary. We want to offer the users to flexibility to use only the +pieces they need, and we will also gain in maintainability by splitting the project among multiple +repositories. + +As it currently stands, the rough design outlines is to have: +- Low level plumbing tools, each dealing with one responsibility (e.g., [runC](https://runc.io)) +- Docker subsystems services, each exposing an elementary concept over an API, and relying on one or +multiple lower level plumbing tools for their implementation (e.g., network management) +- Docker Engine to expose higher level actions (e.g., create a container with volume `V` and network +`N`), while still providing pass-through access to the individual subsystems. + +The architectural details are still being worked on, but one thing we know for sure is that we need +to technically decouple the pieces. + +### 1.2.1 Runtime + +A Runtime tool already exists today in the form of [runC](https://github.com/opencontainers/runc). +We intend to modify the Engine to directly call out to a binary implementing the Open Containers +Specification such as runC rather than relying on libcontainer to set the container runtime up. + +This plan will deprecate the existing [`execdriver`](https://github.com/docker/docker/tree/master/daemon/execdriver) +as different runtime backends will be implemented as separated binaries instead of being compiled +into the Engine. + +### 1.2.2 Builder + +The Builder (i.e., the ability to build an image from a Dockerfile) is already nicely decoupled, +but would benefit from being entirely separated from the Engine, and rely on the standard Engine +API for its operations. + +### 1.2.3 Distribution + +Distribution already has a [dedicated repository](https://github.com/docker/distribution) which +holds the implementation for Registry v2 and client libraries. We could imagine going further by +having the Engine call out to a binary providing image distribution related functionalities. + +There are two short term goals related to image distribution. The first is stabilize and simplify +the push/pull code. Following that is the conversion to the more secure Registry V2 protocol. + +### 1.2.4 Networking + +Most of networking related code was already decoupled today in [libnetwork](https://github.com/docker/libnetwork). +As with other ingredients, we might want to take it a step further and make it a meaningful utility +that the Engine would call out to instead of a library. + +## 1.3 Plugins + +An initiative around plugins started with Docker 1.7.0, with the goal of allowing for out of +process extensibility of some Docker functionalities, starting with volumes and networking. The +approach is to provide specific extension points rather than generic hooking facilities. We also +deliberately keep the extensions API the simplest possible, expanding as we discover valid use +cases that cannot be implemented. + +At the time of writing: + +- Plugin support is merged as an experimental feature: real world use cases and user feedback will +help us refine the UX to make the feature more user friendly. +- There are no immediate plans to expand on the number of pluggable subsystems. +- Golang 1.5 might add language support for [plugins](https://docs.google.com/document/d/1nr-TQHw_er6GOQRsF6T43GGhFDelrAP0NqSS_00RgZQ) +which we consider supporting as an alternative to JSON/HTTP. + +## 1.4 Volume management + +Volumes are not a first class citizen in the Engine today: we would like better volume management, +similar to the way network are managed in the new [CNM](https://github.com/docker/docker/issues/9983). + +## 1.5 Better API implementation + +The current Engine API is insufficiently typed, versioned, and ultimately hard to maintain. We +also suffer from the lack of a common implementation with [Swarm](https://github.com/docker/swarm). + +## 1.6 Checkpoint/restore + +Support for checkpoint/restore was [merged](https://github.com/docker/libcontainer/pull/479) in +[libcontainer](https://github.com/docker/libcontainer) and made available through [runC](https://runc.io): +we intend to take advantage of it in the Engine. + +# 2 Frozen features + +## 2.1 Docker exec + +We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a +*debugging* feature, as well as being strongly dependent on the the Runtime ingredient effort. + +## 2.2 Dockerfile syntax + +The Dockerfile syntax as we know it is simple, and has proven succesful in supporting all our +[official images](https://github.com/docker-library/official-images). Although this is *not* a +definitive move, we temporarily won't accept more patches to the Dockerfile syntax for several +reasons: + +- Long term impact of syntax changes is a sensitive matter that require an amount of attention +the volume of Engine codebase and activity today doesn't allow us to provide. +- Allowing the Builder to be implemented as a separate utility consuming the Engine's API will +open the door for many possibilities, such as offering alternate syntaxes or DSL for existing +languages without cluttering the Engine's codebase. +- A standalone Builder will also offer the opportunity for a better dedicated group of maintainers +to own the Dockerfile syntax and decide collectively on the direction to give it. +- Our experience with official images tend to show that no new instruction or syntax expansion is +*strictly* necessary for the majority of use cases, and although we are aware many things are still +lacking for many, we cannot make it a priority yet for the above reasons. + +Again, this is not about saying that the Dockerfile syntax is done, it's about making choices about +what we want to do first! + +## 2.3 Remote Registry Operations + +A large amount of work is ongoing in the area of image distribution and +provenance. This includes moving to the V2 Registry API and heavily +refactoring the code that powers these features. The desired result is more +secure, reliable and easier to use image distribution. + +Part of the problem with this part of the code base is the lack of a stable +and flexible interface. If new features are added that access the registry +without solidifying these interfaces, achieving feature parity will continue +to be elusive. While we get a handle on this situation, we are imposing a +moratorium on new code that accesses the Registry API in commands that don't +already make remote calls. + +Currently, only the following commands cause interaction with a remote +registry: + +- push +- pull +- run +- build +- search +- login + +In the interest of stabilizing the registry access model during this ongoing +work, we are not accepting additions to other commands that will cause remote +interaction with the Registry API. This moratorium will lift when the goals of +the distribution project have been met. diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..a7ee35a3 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.8.3 diff --git a/api/README.md b/api/README.md new file mode 100644 index 00000000..453f61a1 --- /dev/null +++ b/api/README.md @@ -0,0 +1,5 @@ +This directory contains code pertaining to the Docker API: + + - Used by the docker client when communicating with the docker daemon + + - Used by third party tools wishing to interface with the docker daemon diff --git a/api/api_unit_test.go b/api/api_unit_test.go new file mode 100644 index 00000000..678331d3 --- /dev/null +++ b/api/api_unit_test.go @@ -0,0 +1,19 @@ +package api + +import ( + "testing" +) + +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} diff --git a/api/client/attach.go b/api/client/attach.go new file mode 100644 index 00000000..584c53ea --- /dev/null +++ b/api/client/attach.go @@ -0,0 +1,84 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/signal" +) + +// CmdAttach attaches to a running container. +// +// Usage: docker attach [OPTIONS] CONTAINER +func (cli *DockerCli) CmdAttach(args ...string) error { + cmd := Cli.Subcmd("attach", []string{"CONTAINER"}, "Attach to a running container", true) + noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") + proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process") + + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + var c types.ContainerJSON + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return err + } + + if !c.State.Running { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + if err := cli.CheckTtyInput(!*noStdin, c.Config.Tty); err != nil { + return err + } + + if c.Config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + if !*noStdin && c.Config.OpenStdin { + v.Set("stdin", "1") + in = cli.in + } + + v.Set("stdout", "1") + v.Set("stderr", "1") + + if *proxy && !c.Config.Tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), c.Config.Tty, in, cli.out, cli.err, nil, nil); err != nil { + return err + } + + _, status, err := getExitCode(cli, cmd.Arg(0)) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/api/client/build.go b/api/client/build.go new file mode 100644 index 00000000..bc5172d1 --- /dev/null +++ b/api/client/build.go @@ -0,0 +1,634 @@ +package client + +import ( + "archive/tar" + "bufio" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/graph/tags" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/ulimit" + "github.com/docker/docker/pkg/units" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +const ( + tarHeaderSize = 512 +) + +// CmdBuild builds a new image from the source code at a given path. +// +// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. +// +// Usage: docker build [OPTIONS] PATH | URL | - +func (cli *DockerCli) CmdBuild(args ...string) error { + cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, "Build a new image from the source code at PATH", true) + tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") + noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") + rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") + forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") + pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") + dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") + flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCpuPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") + + ulimits := make(map[string]*ulimit.Ulimit) + flUlimits := opts.NewUlimitOpt(&ulimits) + cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") + + cmd.Require(flag.Exact, 1) + + // For trusted pull on "FROM " instruction. + addTrustedFlags(cmd, true) + + cmd.ParseFlags(args, true) + + var ( + context io.ReadCloser + isRemote bool + err error + ) + + _, err = exec.LookPath("git") + hasGit := err == nil + + specifiedContext := cmd.Arg(0) + + var ( + contextDir string + tempDir string + relDockerfile string + ) + + switch { + case specifiedContext == "-": + tempDir, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName) + case urlutil.IsGitURL(specifiedContext) && hasGit: + tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName) + case urlutil.IsURL(specifiedContext): + tempDir, relDockerfile, err = getContextFromURL(cli.out, specifiedContext, *dockerfileName) + default: + contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName) + } + + if err != nil { + return fmt.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + // Resolve the FROM lines in the Dockerfile to trusted digest references + // using Notary. On a successful build, we must tag the resolved digests + // to the original name specified in the Dockerfile. + newDockerfile, resolvedTags, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference) + if err != nil { + return fmt.Errorf("unable to process Dockerfile: %v", err) + } + defer newDockerfile.Close() + + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + var includes = []string{"."} + + excludes, err := utils.ReadDockerIgnore(path.Join(contextDir, ".dockerignore")) + if err != nil { + return err + } + + if err := utils.ValidateContextDirectory(contextDir, excludes); err != nil { + return fmt.Errorf("Error checking context: '%s'.", err) + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The deamon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by ValidateContextDirectory above. + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return err + } + + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + context = replaceDockerfileTarWrapper(context, newDockerfile, relDockerfile) + + // Setup an upload progress bar + // FIXME: ProgressReader shouldn't be this annoying to use + sf := streamformatter.NewStreamFormatter() + var body io.Reader = progressreader.New(progressreader.Config{ + In: context, + Out: cli.out, + Formatter: sf, + NewLines: true, + ID: "", + Action: "Sending build context to Docker daemon", + }) + + var memory int64 + if *flMemoryString != "" { + parsedMemory, err := units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + memory = parsedMemory + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + memorySwap = parsedMemorySwap + } + } + // Send the build context + v := &url.Values{} + + //Check if the given image name can be resolved + if *tag != "" { + repository, tag := parsers.ParseRepositoryTag(*tag) + if err := registry.ValidateRepositoryName(repository); err != nil { + return err + } + if len(tag) > 0 { + if err := tags.ValidateTagName(tag); err != nil { + return err + } + } + } + + v.Set("t", *tag) + + if *suppressOutput { + v.Set("q", "1") + } + if isRemote { + v.Set("remote", cmd.Arg(0)) + } + if *noCache { + v.Set("nocache", "1") + } + if *rm { + v.Set("rm", "1") + } else { + v.Set("rm", "0") + } + + if *forceRm { + v.Set("forcerm", "1") + } + + if *pull { + v.Set("pull", "1") + } + + v.Set("cpusetcpus", *flCPUSetCpus) + v.Set("cpusetmems", *flCPUSetMems) + v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10)) + v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10)) + v.Set("cpuperiod", strconv.FormatInt(*flCpuPeriod, 10)) + v.Set("memory", strconv.FormatInt(memory, 10)) + v.Set("memswap", strconv.FormatInt(memorySwap, 10)) + v.Set("cgroupparent", *flCgroupParent) + + v.Set("dockerfile", relDockerfile) + + ulimitsVar := flUlimits.GetList() + ulimitsJson, err := json.Marshal(ulimitsVar) + if err != nil { + return err + } + v.Set("ulimits", string(ulimitsJson)) + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(cli.configFile.AuthConfigs) + if err != nil { + return err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/tar") + + sopts := &streamOpts{ + rawTerminal: true, + in: body, + out: cli.out, + headers: headers, + } + + serverResp, err := cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts) + + // Windows: show error message about modified file permissions. + if runtime.GOOS == "windows" { + h, err := httputils.ParseServerHeader(serverResp.header.Get("Server")) + if err == nil { + if h.OS != "windows" { + fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + } + } + + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + + if err != nil { + return err + } + + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + + return nil +} + +// getDockerfileRelPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory, the relative path of +// the dockerfile in that context directory, and a non-nil error on success. +func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { + if absContextDir, err = filepath.Abs(givenContextDir); err != nil { + return "", "", fmt.Errorf("unable to get absolute context directory: %v", err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, api.DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(api.DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Verify that 'filename' is within the build context + absDockerfile, err = symlink.FollowSymlinkInScope(absDockerfile, absContextDir) + if err != nil { + return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", "", fmt.Errorf("Cannot locate Dockerfile: absDockerfile: %q", absDockerfile) + } + return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) + } + + if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { + return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) + } + + return absContextDir, relDockerfile, nil +} + +// writeToFile copies from the given reader and writes it to a file with the +// given filename. +func writeToFile(r io.Reader, filename string) error { + file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to create file: %v", err) + } + defer file.Close() + + if _, err := io.Copy(file, r); err != nil { + return fmt.Errorf("unable to write file: %v", err) + } + + return nil +} + +// getContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive to be extracted to a temporary directory used as +// the context directory. Returns the absolute path to the temporary context +// directory, the relative path of the dockerfile in that context directory, +// and a non-nil error on success. +func getContextFromReader(r io.Reader, dockerfileName string) (absContextDir, relDockerfile string, err error) { + buf := bufio.NewReader(r) + + magic, err := buf.Peek(tarHeaderSize) + if err != nil && err != io.EOF { + return "", "", fmt.Errorf("failed to peek context header from STDIN: %v", err) + } + + if absContextDir, err = ioutil.TempDir("", "docker-build-context-"); err != nil { + return "", "", fmt.Errorf("unbale to create temporary context directory: %v", err) + } + + defer func(d string) { + if err != nil { + os.RemoveAll(d) + } + }(absContextDir) + + if !archive.IsArchive(magic) { // Input should be read as a Dockerfile. + // -f option has no meaning when we're reading it from stdin, + // so just use our default Dockerfile name + relDockerfile = api.DefaultDockerfileName + + return absContextDir, relDockerfile, writeToFile(buf, filepath.Join(absContextDir, relDockerfile)) + } + + if err := archive.Untar(buf, absContextDir, nil); err != nil { + return "", "", fmt.Errorf("unable to extract stdin to temporary context direcotry: %v", err) + } + + return getDockerfileRelPath(absContextDir, dockerfileName) +} + +// getContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func getContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + if absContextDir, err = utils.GitClone(gitURL); err != nil { + return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) + } + + return getDockerfileRelPath(absContextDir, dockerfileName) +} + +// getContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a context tar +// archive and stored in a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + response, err := httputils.Download(remoteURL) + if err != nil { + return "", "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + defer response.Body.Close() + + // Pass the response body through a progress reader. + progReader := &progressreader.Config{ + In: response.Body, + Out: out, + Formatter: streamformatter.NewStreamFormatter(), + Size: int(response.ContentLength), + NewLines: true, + ID: "", + Action: fmt.Sprintf("Downloading build context from remote url: %s", remoteURL), + } + + return getContextFromReader(progReader, dockerfileName) +} + +// getContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func getContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { + // When using a local context directory, when the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + return getDockerfileRelPath(localDir, dockerfileName) +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +type trustedDockerfile struct { + *os.File + size int64 +} + +func (td *trustedDockerfile) Close() error { + td.File.Close() + return os.Remove(td.File.Name()) +} + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + repoInfo *registry.RepositoryInfo + digestRef, tagRef registry.Reference +} + +// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +func rewriteDockerfileFrom(dockerfileName string, translator func(string, registry.Reference) (registry.Reference, error)) (newDockerfile *trustedDockerfile, resolvedTags []*resolvedTag, err error) { + dockerfile, err := os.Open(dockerfileName) + if err != nil { + return nil, nil, fmt.Errorf("unable to open Dockerfile: %v", err) + } + defer dockerfile.Close() + + scanner := bufio.NewScanner(dockerfile) + + // Make a tempfile to store the rewritten Dockerfile. + tempFile, err := ioutil.TempFile("", "trusted-dockerfile-") + if err != nil { + return nil, nil, fmt.Errorf("unable to make temporary trusted Dockerfile: %v", err) + } + + trustedFile := &trustedDockerfile{ + File: tempFile, + } + + defer func() { + if err != nil { + // Close the tempfile if there was an error during Notary lookups. + // Otherwise the caller should close it. + trustedFile.Close() + } + }() + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != "scratch" { + // Replace the line with a resolved "FROM repo@digest" + repo, tag := parsers.ParseRepositoryTag(matches[1]) + if tag == "" { + tag = tags.DEFAULTTAG + } + + repoInfo, err := registry.ParseRepositoryInfo(repo) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse repository info: %v", err) + } + + ref := registry.ParseReference(tag) + + if !ref.HasDigest() && isTrusted() { + trustedRef, err := translator(repo, ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.ImageName(repo))) + resolvedTags = append(resolvedTags, &resolvedTag{ + repoInfo: repoInfo, + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + n, err := fmt.Fprintln(tempFile, line) + if err != nil { + return nil, nil, err + } + + trustedFile.size += int64(n) + } + + tempFile.Seek(0, os.SEEK_SET) + + return trustedFile, resolvedTags, scanner.Err() +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, newDockerfile *trustedDockerfile, dockerfileName string) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + var content io.Reader = tarReader + + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + hdr.Size = newDockerfile.size + content = newDockerfile + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/api/client/cli.go b/api/client/cli.go new file mode 100644 index 00000000..fc4e2362 --- /dev/null +++ b/api/client/cli.go @@ -0,0 +1,162 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/sockets" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/tlsconfig" +) + +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + // initializing closure + init func() error + + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests + basePath string + + // configFile has the client configuration file + configFile *cliconfig.ConfigFile + // in holds the input stream and closer (io.ReadCloser) for the client. + in io.ReadCloser + // out holds the output stream (io.Writer) for the client. + out io.Writer + // err holds the error stream (io.Writer) for the client. + err io.Writer + // keyFile holds the key file as a string. + keyFile string + // tlsConfig holds the TLS configuration for the client, and will + // set the scheme to https in NewDockerCli if present. + tlsConfig *tls.Config + // scheme holds the scheme of the client i.e. https. + scheme string + // inFd holds the file descriptor of the client's STDIN (if valid). + inFd uintptr + // outFd holds file descriptor of the client's STDOUT (if valid). + outFd uintptr + // isTerminalIn indicates whether the client's STDIN is a TTY + isTerminalIn bool + // isTerminalOut dindicates whether the client's STDOUT is a TTY + isTerminalOut bool + // transport holds the client transport instance. + transport *http.Transport +} + +func (cli *DockerCli) Initialize() error { + if cli.init == nil { + return nil + } + return cli.init() +} + +// CheckTtyInput checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !cli.isTerminalIn { + return errors.New("cannot enable tty mode on non tty input") + } + return nil +} + +func (cli *DockerCli) PsFormat() string { + return cli.configFile.PsFormat +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config +// is set the client scheme will be set to https. +// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). +func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientFlags) *DockerCli { + cli := &DockerCli{ + in: in, + out: out, + err: err, + keyFile: clientFlags.Common.TrustKey, + } + + cli.init = func() error { + clientFlags.PostParse() + + hosts := clientFlags.Common.Hosts + + switch len(hosts) { + case 0: + defaultHost := os.Getenv("DOCKER_HOST") + if defaultHost == "" { + defaultHost = opts.DefaultHost + } + defaultHost, err := opts.ValidateHost(defaultHost) + if err != nil { + return err + } + hosts = []string{defaultHost} + case 1: + // only accept one host to talk to + default: + return errors.New("Please specify only one -H") + } + + protoAddrParts := strings.SplitN(hosts[0], "://", 2) + cli.proto, cli.addr = protoAddrParts[0], protoAddrParts[1] + + if cli.proto == "tcp" { + // error is checked in pkg/parsers already + parsed, _ := url.Parse("tcp://" + cli.addr) + cli.addr = parsed.Host + cli.basePath = parsed.Path + } + + if clientFlags.Common.TLSOptions != nil { + cli.scheme = "https" + var e error + cli.tlsConfig, e = tlsconfig.Client(*clientFlags.Common.TLSOptions) + if e != nil { + return e + } + } else { + cli.scheme = "http" + } + + if cli.in != nil { + cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) + } + if cli.out != nil { + cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) + } + + // The transport is created here for reuse during the client session. + cli.transport = &http.Transport{ + TLSClientConfig: cli.tlsConfig, + } + sockets.ConfigureTCPTransport(cli.transport, cli.proto, cli.addr) + + configFile, e := cliconfig.Load(cliconfig.ConfigDir()) + if e != nil { + fmt.Fprintf(cli.err, "WARNING: Error loading config file:%v\n", e) + } + cli.configFile = configFile + + return nil + } + + return cli +} diff --git a/api/client/client.go b/api/client/client.go new file mode 100644 index 00000000..4cfce5f6 --- /dev/null +++ b/api/client/client.go @@ -0,0 +1,5 @@ +// Package client provides a command-line interface for Docker. +// +// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. +// See https://docs.docker.com/installation/ for instructions on installing Docker. +package client diff --git a/api/client/commit.go b/api/client/commit.go new file mode 100644 index 00000000..fe4acd48 --- /dev/null +++ b/api/client/commit.go @@ -0,0 +1,84 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" +) + +// CmdCommit creates a new image from a container's changes. +// +// Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, "Create a new image from a container's changes", true) + flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. + flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") + cmd.Require(flag.Max, 2) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + name = cmd.Arg(0) + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) + ) + + //Check if the given image name can be resolved + if repository != "" { + if err := registry.ValidateRepositoryName(repository); err != nil { + return err + } + } + + v := url.Values{} + v.Set("container", name) + v.Set("repo", repository) + v.Set("tag", tag) + v.Set("comment", *flComment) + v.Set("author", *flAuthor) + for _, change := range flChanges.GetAll() { + v.Add("changes", change) + } + + if *flPause != true { + v.Set("pause", "0") + } + + var ( + config *runconfig.Config + response types.ContainerCommitResponse + ) + + if *flConfig != "" { + config = &runconfig.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + serverResp, err := cli.call("POST", "/commit?"+v.Encode(), config, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil { + return err + } + + fmt.Fprintln(cli.out, response.ID) + return nil +} diff --git a/api/client/cp.go b/api/client/cp.go new file mode 100644 index 00000000..a36212a7 --- /dev/null +++ b/api/client/cp.go @@ -0,0 +1,324 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" +) + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +// CmdCp copies files/folders to or from a path in a container. +// +// When copying from a container, if LOCALPATH is '-' the data is written as a +// tar archive file to STDOUT. +// +// When copying to a container, if LOCALPATH is '-' the data is read as a tar +// archive file from STDIN, and the destination CONTAINER:PATH, must specify +// a directory. +// +// Usage: +// docker cp CONTAINER:PATH LOCALPATH|- +// docker cp LOCALPATH|- CONTAINER:PATH +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := Cli.Subcmd( + "cp", + []string{"CONTAINER:PATH LOCALPATH|-", "LOCALPATH|- CONTAINER:PATH"}, + strings.Join([]string{ + "Copy files/folders between a container and your host.\n", + "Use '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + true, + ) + + cmd.Require(flag.Exact, 2) + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("source can not be empty") + } + if cmd.Arg(1) == "" { + return fmt.Errorf("destination can not be empty") + } + + srcContainer, srcPath := splitCpArg(cmd.Arg(0)) + dstContainer, dstPath := splitCpArg(cmd.Arg(1)) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + switch direction { + case fromContainer: + return cli.copyFromContainer(srcContainer, srcPath, dstPath) + case toContainer: + return cli.copyToContainer(srcPath, dstContainer, dstPath) + case acrossContainers: + // Copying between containers isn't supported. + return fmt.Errorf("copying between containers is not supported") + default: + // User didn't specify any container. + return fmt.Errorf("must specify at least one container source") + } +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if filepath.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} + +func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive?%s", containerName, query.Encode()) + + response, err := cli.call("HEAD", urlStr, nil, nil) + if err != nil { + return stat, err + } + defer response.body.Close() + + if response.statusCode != http.StatusOK { + return stat, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return getContainerPathStatFromHeader(response.header) +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive?%s", srcContainer, query.Encode()) + + response, err := cli.call("GET", urlStr, nil, nil) + if err != nil { + return err + } + defer response.body.Close() + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, response.body) + + return err + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return fmt.Errorf("unable to get resource stat from response: %s", err) + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + } + + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(response.body, srcInfo, dstPath) +} + +func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := cli.statContainerPath(dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !filepath.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = cli.statContainerPath(dstContainer, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versia) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the disired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + query := make(url.Values, 2) + query.Set("path", filepath.ToSlash(resolvedDstPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + query.Set("noOverwriteDirNonDir", "true") + + urlStr := fmt.Sprintf("/containers/%s/archive?%s", dstContainer, query.Encode()) + + response, err := cli.stream("PUT", urlStr, &streamOpts{in: content}) + if err != nil { + return err + } + defer response.body.Close() + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} diff --git a/api/client/create.go b/api/client/create.go new file mode 100644 index 00000000..76e935eb --- /dev/null +++ b/api/client/create.go @@ -0,0 +1,185 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/graph/tags" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" +) + +func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { + v := url.Values{} + repos, tag := parsers.ParseRepositoryTag(image) + // pull only the image tagged 'latest' if no tag was specified + if tag == "" { + tag = tags.DEFAULTTAG + } + v.Set("fromImage", repos) + v.Set("tag", tag) + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(repos) + if err != nil { + return err + } + + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + sopts := &streamOpts{ + rawTerminal: true, + out: out, + headers: map[string][]string{"X-Registry-Auth": registryAuthHeader}, + } + if _, err := cli.stream("POST", "/images/create?"+v.Encode(), sopts); err != nil { + return err + } + return nil +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { + containerValues := url.Values{} + if name != "" { + containerValues.Set("name", name) + } + + mergedConfig := runconfig.MergeConfigs(config, hostConfig) + + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + repo, tag := parsers.ParseRepositoryTag(config.Image) + if tag == "" { + tag = tags.DEFAULTTAG + } + + ref := registry.ParseReference(tag) + var trustedRef registry.Reference + + if isTrusted() && !ref.HasDigest() { + var err error + trustedRef, err = cli.trustedReference(repo, ref) + if err != nil { + return nil, err + } + config.Image = trustedRef.ImageName(repo) + } + + //create the container + serverResp, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil) + //if image not found try to pull it + if serverResp.statusCode == 404 && strings.Contains(err.Error(), config.Image) { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.ImageName(repo)) + + // we don't want to write to stdout anything apart from container.ID + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { + return nil, err + } + if trustedRef != nil && !ref.HasDigest() { + repoInfo, err := registry.ParseRepositoryInfo(repo) + if err != nil { + return nil, err + } + if err := cli.tagTrusted(repoInfo, trustedRef, ref); err != nil { + return nil, err + } + } + // Retry + if serverResp, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + + defer serverResp.body.Close() + + var response types.ContainerCreateResponse + if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil { + return nil, err + } + for _, warning := range response.Warnings { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} + +// CmdCreate creates a new container from a given image. +// +// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdCreate(args ...string) error { + cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, "Create a new container", true) + addTrustedFlags(cmd, true) + + // These are flags not stored in Config/HostConfig + var ( + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + ) + + config, hostConfig, cmd, err := runconfig.Parse(cmd, args) + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(1) + } + if config.Image == "" { + cmd.Usage() + return nil + } + response, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", response.ID) + return nil +} diff --git a/api/client/diff.go b/api/client/diff.go new file mode 100644 index 00000000..b955774c --- /dev/null +++ b/api/client/diff.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdDiff shows changes on a container's filesystem. +// +// Each changed file is printed on a separate line, prefixed with a single +// character that indicates the status of the file: C (modified), A (added), +// or D (deleted). +// +// Usage: docker diff CONTAINER +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, "Inspect changes on a container's filesystem", true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("Container name cannot be empty") + } + + serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + changes := []types.ContainerChange{} + if err := json.NewDecoder(serverResp.body).Decode(&changes); err != nil { + return err + } + + for _, change := range changes { + var kind string + switch change.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path) + } + + return nil +} diff --git a/api/client/events.go b/api/client/events.go new file mode 100644 index 00000000..c0168bdb --- /dev/null +++ b/api/client/events.go @@ -0,0 +1,63 @@ +package client + +import ( + "net/url" + "time" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/timeutils" +) + +// CmdEvents prints a live stream of real time events from the server. +// +// Usage: docker events [OPTIONS] +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := Cli.Subcmd("events", nil, "Get real time events from the server", true) + since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp") + until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + var ( + v = url.Values{} + eventFilterArgs = filters.Args{} + ) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + for _, f := range flFilter.GetAll() { + var err error + eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) + if err != nil { + return err + } + } + ref := time.Now() + if *since != "" { + v.Set("since", timeutils.GetTimestamp(*since, ref)) + } + if *until != "" { + v.Set("until", timeutils.GetTimestamp(*until, ref)) + } + if len(eventFilterArgs) > 0 { + filterJSON, err := filters.ToParam(eventFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJSON) + } + sopts := &streamOpts{ + rawTerminal: true, + out: cli.out, + } + if _, err := cli.stream("GET", "/events?"+v.Encode(), sopts); err != nil { + return err + } + return nil +} diff --git a/api/client/exec.go b/api/client/exec.go new file mode 100644 index 00000000..d02c019b --- /dev/null +++ b/api/client/exec.go @@ -0,0 +1,134 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/runconfig" +) + +// CmdExec runs a command in a running container. +// +// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] +func (cli *DockerCli) CmdExec(args ...string) error { + cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, "Run a command in a running container", true) + + execConfig, err := runconfig.ParseExec(cmd, args) + // just in case the ParseExec does not exit + if execConfig.Container == "" || err != nil { + return Cli.StatusError{StatusCode: 1} + } + + serverResp, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + var response types.ContainerExecCreateResponse + if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil { + return err + } + + execID := response.ID + + if execID == "" { + fmt.Fprintf(cli.out, "exec ID empty") + return nil + } + + //Temp struct for execStart so that we don't need to transfer all the execConfig + execStartCheck := &types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + if !execConfig.Detach { + if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { + if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execStartCheck, nil)); err != nil { + return err + } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(cli.out, "%s\n", execID) + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + hijacked = make(chan io.Closer) + errCh chan error + ) + + // Block the return until the chan gets closed + defer func() { + logrus.Debugf("End of CmdExec(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)") + } + }() + + if execConfig.AttachStdin { + in = cli.in + } + if execConfig.AttachStdout { + out = cli.out + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + errCh = promise.Go(func() error { + return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig) + }) + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that hijack gets closed when returning. (result + // in closing hijack chan and freeing server's goroutines. + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + if execConfig.Tty && cli.isTerminalIn { + if err := cli.monitorTtySize(execID, true); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(cli, execID); err != nil { + return err + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/api/client/export.go b/api/client/export.go new file mode 100644 index 00000000..78594997 --- /dev/null +++ b/api/client/export.go @@ -0,0 +1,47 @@ +package client + +import ( + "errors" + "io" + "os" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdExport exports a filesystem as a tar archive. +// +// The tar archive is streamed to STDOUT by default or written to a file. +// +// Usage: docker export [OPTIONS] CONTAINER +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := Cli.Subcmd("export", []string{"CONTAINER"}, "Export the contents of a container's filesystem as a tar archive", true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + var ( + output io.Writer = cli.out + err error + ) + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } else if cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + image := cmd.Arg(0) + sopts := &streamOpts{ + rawTerminal: true, + out: output, + } + if _, err := cli.stream("GET", "/containers/"+image+"/export", sopts); err != nil { + return err + } + + return nil +} diff --git a/api/client/hijack.go b/api/client/hijack.go new file mode 100644 index 00000000..5853d79b --- /dev/null +++ b/api/client/hijack.go @@ -0,0 +1,257 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "os" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if cwc, ok := c.rawConn.(interface { + CloseWrite() error + }); ok { + return cwc.CloseWrite() + } + return nil +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + rawConn, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func (cli *DockerCli) dial() (net.Conn, error) { + if cli.tlsConfig != nil && cli.proto != "unix" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(cli.proto, cli.addr, cli.tlsConfig) + } + return net.Dial(cli.proto, cli.addr) +} + +func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error { + defer func() { + if started != nil { + close(started) + } + }() + + params, err := cli.encodeData(data) + if err != nil { + return err + } + req, err := http.NewRequest(method, fmt.Sprintf("%s/v%s%s", cli.basePath, api.Version, path), params) + if err != nil { + return err + } + + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.configFile.HTTPHeaders { + req.Header.Set(k, v) + } + + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION+" ("+runtime.GOOS+")") + req.Header.Set("Content-Type", "text/plain") + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + req.Host = cli.addr + + dial, err := cli.dial() + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := dial.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + clientconn.Do(req) + + rwc, br := clientconn.Hijack() + defer rwc.Close() + + if started != nil { + started <- rwc + } + + var receiveStdout chan error + + var oldState *term.State + + if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" { + oldState, err = term.SetRawTerminal(cli.inFd) + if err != nil { + return err + } + defer term.RestoreTerminal(cli.inFd, oldState) + } + + if stdout != nil || stderr != nil { + receiveStdout = promise.Go(func() (err error) { + defer func() { + if in != nil { + if setRawTerminal && cli.isTerminalIn { + term.RestoreTerminal(cli.inFd, oldState) + } + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if runtime.GOOS != "darwin" { + in.Close() + } + } + }() + + // When TTY is ON, use regular copy + if setRawTerminal && stdout != nil { + _, err = io.Copy(stdout, br) + } else { + _, err = stdcopy.StdCopy(stdout, stderr, br) + } + logrus.Debugf("[hijack] End of stdout") + return err + }) + } + + sendStdin := promise.Go(func() error { + if in != nil { + io.Copy(rwc, in) + logrus.Debugf("[hijack] End of stdin") + } + + if conn, ok := rwc.(interface { + CloseWrite() error + }); ok { + if err := conn.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + } + // Discard errors due to pipe interruption + return nil + }) + + if stdout != nil || stderr != nil { + if err := <-receiveStdout; err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + } + + if !cli.isTerminalIn { + if err := <-sendStdin; err != nil { + logrus.Debugf("Error sendStdin: %s", err) + return err + } + } + return nil +} diff --git a/api/client/history.go b/api/client/history.go new file mode 100644 index 00000000..925add66 --- /dev/null +++ b/api/client/history.go @@ -0,0 +1,77 @@ +package client + +import ( + "encoding/json" + "fmt" + "text/tabwriter" + "time" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/units" +) + +// CmdHistory shows the history of an image. +// +// Usage: docker history [OPTIONS] IMAGE +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := Cli.Subcmd("history", []string{"IMAGE"}, "Show the history of an image", true) + human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + serverResp, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + history := []types.ImageHistory{} + if err := json.NewDecoder(serverResp.body).Decode(&history); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + } + + for _, entry := range history { + if *noTrunc { + fmt.Fprintf(w, entry.ID) + } else { + fmt.Fprintf(w, stringid.TruncateID(entry.ID)) + } + if !*quiet { + if *human { + fmt.Fprintf(w, "\t%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0)))) + } else { + fmt.Fprintf(w, "\t%s\t", time.Unix(entry.Created, 0).Format(time.RFC3339)) + } + + if *noTrunc { + fmt.Fprintf(w, "%s\t", entry.CreatedBy) + } else { + fmt.Fprintf(w, "%s\t", stringutils.Truncate(entry.CreatedBy, 45)) + } + + if *human { + fmt.Fprintf(w, "%s\t", units.HumanSize(float64(entry.Size))) + } else { + fmt.Fprintf(w, "%d\t", entry.Size) + } + + fmt.Fprintf(w, "%s", entry.Comment) + } + fmt.Fprintf(w, "\n") + } + w.Flush() + return nil +} diff --git a/api/client/images.go b/api/client/images.go new file mode 100644 index 00000000..92adeed0 --- /dev/null +++ b/api/client/images.go @@ -0,0 +1,130 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + "text/tabwriter" + "time" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/units" + "github.com/docker/docker/utils" +) + +// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. +// +// Usage: docker images [OPTIONS] [REPOSITORY] +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := Cli.Subcmd("images", []string{"[REPOSITORY]"}, "List images", true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + imageFilterArgs := filters.Args{} + for _, f := range flFilter.GetAll() { + var err error + imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) + if err != nil { + return err + } + } + + matchName := cmd.Arg(0) + v := url.Values{} + if len(imageFilterArgs) > 0 { + filterJSON, err := filters.ToParam(imageFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJSON) + } + + if cmd.NArg() == 1 { + // FIXME rename this parameter, to not be confused with the filters flag + v.Set("filter", matchName) + } + if *all { + v.Set("all", "1") + } + + serverResp, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + images := []types.Image{} + if err := json.NewDecoder(serverResp.body).Decode(&images); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + if *showDigests { + fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + } else { + fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + } + } + + for _, image := range images { + ID := image.ID + if !*noTrunc { + ID = stringid.TruncateID(ID) + } + + repoTags := image.RepoTags + repoDigests := image.RepoDigests + + if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { + // dangling image - clear out either repoTags or repoDigsts so we only show it once below + repoDigests = []string{} + } + + // combine the tags and digests lists + tagsAndDigests := append(repoTags, repoDigests...) + for _, repoAndRef := range tagsAndDigests { + repo, ref := parsers.ParseRepositoryTag(repoAndRef) + // default tag and digest to none - if there's a value, it'll be set below + tag := "" + digest := "" + if utils.DigestReference(ref) { + digest = ref + } else { + tag = ref + } + + if !*quiet { + if *showDigests { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) + } else { + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) + } + } else { + fmt.Fprintln(w, ID) + } + } + } + + if !*quiet { + w.Flush() + } + return nil +} diff --git a/api/client/import.go b/api/client/import.go new file mode 100644 index 00000000..ec3d028f --- /dev/null +++ b/api/client/import.go @@ -0,0 +1,77 @@ +package client + +import ( + "fmt" + "io" + "net/url" + "os" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/registry" +) + +// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. +// +// The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. +// +// Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, "Create an empty filesystem image and import the contents of the\ntarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then\noptionally tag it.", true) + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + v = url.Values{} + src = cmd.Arg(0) + repository = cmd.Arg(1) + ) + + v.Set("fromSrc", src) + v.Set("repo", repository) + for _, change := range flChanges.GetAll() { + v.Add("changes", change) + } + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") + v.Set("tag", cmd.Arg(2)) + } + + if repository != "" { + //Check if the given image name can be resolved + repo, _ := parsers.ParseRepositoryTag(repository) + if err := registry.ValidateRepositoryName(repo); err != nil { + return err + } + } + + var in io.Reader + + if src == "-" { + in = cli.in + } else if !urlutil.IsURL(src) { + v.Set("fromSrc", "-") + file, err := os.Open(src) + if err != nil { + return err + } + defer file.Close() + in = file + + } + + sopts := &streamOpts{ + rawTerminal: true, + in: in, + out: cli.out, + } + + _, err := cli.stream("POST", "/images/create?"+v.Encode(), sopts) + return err +} diff --git a/api/client/info.go b/api/client/info.go new file mode 100644 index 00000000..c7b19ccb --- /dev/null +++ b/api/client/info.go @@ -0,0 +1,108 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/units" +) + +// CmdInfo displays system-wide information. +// +// Usage: docker info +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := Cli.Subcmd("info", nil, "Display system-wide information", true) + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + serverResp, err := cli.call("GET", "/info", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + info := &types.Info{} + if err := json.NewDecoder(serverResp.body).Decode(info); err != nil { + return fmt.Errorf("Error reading remote info: %v", err) + } + + fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) + fmt.Fprintf(cli.out, "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + } + } + ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem) + fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) + fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name) + ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID) + + if info.Debug { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) + fmt.Fprintf(cli.out, "File Descriptors: %d\n", info.NFd) + fmt.Fprintf(cli.out, "Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(cli.out, "System Time: %s\n", info.SystemTime) + fmt.Fprintf(cli.out, "EventsListeners: %d\n", info.NEventsListener) + fmt.Fprintf(cli.out, "Init SHA1: %s\n", info.InitSha1) + fmt.Fprintf(cli.out, "Init Path: %s\n", info.InitPath) + fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir) + } + + ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HttpProxy) + ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HttpsProxy) + ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) + } + } + // Only output these warnings if the server supports these features + if h, err := httputils.ParseServerHeader(serverResp.header.Get("Server")); err == nil { + if h.OS != "windows" { + if !info.MemoryLimit { + fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") + } + if !info.SwapLimit { + fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") + } + if !info.IPv4Forwarding { + fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") + } + if !info.BridgeNfIptables { + fmt.Fprintf(cli.err, "WARNING: bridge-nf-call-iptables is disabled\n") + } + if !info.BridgeNfIp6tables { + fmt.Fprintf(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled\n") + } + } + } + + if info.Labels != nil { + fmt.Fprintln(cli.out, "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(cli.out, " %s\n", attribute) + } + } + + if info.ExperimentalBuild { + fmt.Fprintf(cli.out, "Experimental: true\n") + } + + return nil +} diff --git a/api/client/inspect.go b/api/client/inspect.go new file mode 100644 index 00000000..6e728bdf --- /dev/null +++ b/api/client/inspect.go @@ -0,0 +1,157 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strings" + "text/template" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +var funcMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +// CmdInspect displays low-level information on one or more containers or images. +// +// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, "Return low-level information on a container or image", true) + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") + inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var tmpl *template.Template + var err error + var obj []byte + + if *tmplStr != "" { + if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { + return Cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + } + + if *inspectType != "" && *inspectType != "container" && *inspectType != "image" { + return fmt.Errorf("%q is not a valid value for --type", *inspectType) + } + + indented := new(bytes.Buffer) + indented.WriteString("[\n") + status := 0 + isImage := false + + for _, name := range cmd.Args() { + + if *inspectType == "" || *inspectType == "container" { + obj, _, err = readBody(cli.call("GET", "/containers/"+name+"/json", nil, nil)) + if err != nil && *inspectType == "container" { + if strings.Contains(err.Error(), "No such") { + fmt.Fprintf(cli.err, "Error: No such container: %s\n", name) + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue + } + } + + if obj == nil && (*inspectType == "" || *inspectType == "image") { + obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, nil)) + isImage = true + if err != nil { + if strings.Contains(err.Error(), "No such") { + if *inspectType == "" { + fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) + } else { + fmt.Fprintf(cli.err, "Error: No such image: %s\n", name) + } + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue + } + + } + + if tmpl == nil { + if err := json.Indent(indented, obj, "", " "); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } else { + rdr := bytes.NewReader(obj) + dec := json.NewDecoder(rdr) + + if isImage { + inspPtr := types.ImageInspect{} + if err := dec.Decode(&inspPtr); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, inspPtr); err != nil { + rdr.Seek(0, 0) + var raw interface{} + if err := dec.Decode(&raw); err != nil { + return err + } + if err = tmpl.Execute(cli.out, raw); err != nil { + return err + } + } + } else { + inspPtr := types.ContainerJSON{} + if err := dec.Decode(&inspPtr); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, inspPtr); err != nil { + rdr.Seek(0, 0) + var raw interface{} + if err := dec.Decode(&raw); err != nil { + return err + } + if err = tmpl.Execute(cli.out, raw); err != nil { + return err + } + } + } + cli.out.Write([]byte{'\n'}) + } + indented.WriteString(",") + } + + if indented.Len() > 1 { + // Remove trailing ',' + indented.Truncate(indented.Len() - 1) + } + indented.WriteString("]\n") + + if tmpl == nil { + // Note that we will always write "[]" when "-f" isn't specified, + // to make sure the output would always be array, see + // https://github.com/docker/docker/pull/9500#issuecomment-65846734 + if _, err := io.Copy(cli.out, indented); err != nil { + return err + } + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/api/client/kill.go b/api/client/kill.go new file mode 100644 index 00000000..63abed31 --- /dev/null +++ b/api/client/kill.go @@ -0,0 +1,33 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdKill kills one or more running container using SIGKILL or a specified signal. +// +// Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, "Kill a running container using SIGKILL or a specified signal", true) + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errNames []string + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, nil)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to kill containers: %v", errNames) + } + return nil +} diff --git a/api/client/load.go b/api/client/load.go new file mode 100644 index 00000000..9501db4f --- /dev/null +++ b/api/client/load.go @@ -0,0 +1,42 @@ +package client + +import ( + "io" + "os" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdLoad loads an image from a tar archive. +// +// The tar archive is read from STDIN by default, or from a tar archive file. +// +// Usage: docker load [OPTIONS] +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := Cli.Subcmd("load", nil, "Load an image from a tar archive or STDIN", true) + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + var ( + input io.Reader = cli.in + err error + ) + if *infile != "" { + input, err = os.Open(*infile) + if err != nil { + return err + } + } + sopts := &streamOpts{ + rawTerminal: true, + in: input, + out: cli.out, + } + if _, err := cli.stream("POST", "/images/load", sopts); err != nil { + return err + } + return nil +} diff --git a/api/client/login.go b/api/client/login.go new file mode 100644 index 00000000..68ec5c6d --- /dev/null +++ b/api/client/login.go @@ -0,0 +1,147 @@ +package client + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" +) + +// CmdLogin logs in or registers a user to a Docker registry service. +// +// If no server is specified, the user will be logged into or registered to the registry's index server. +// +// Usage: docker login SERVER +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := Cli.Subcmd("login", []string{"[SERVER]"}, "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.IndexServer+"\" is the default.", true) + cmd.Require(flag.Max, 1) + + var username, password, email string + + cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") + cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") + cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") + + cmd.ParseFlags(args, true) + + serverAddress := registry.IndexServer + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + promptDefault := func(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } + } + + readInput := func(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) + } + + authconfig, ok := cli.configFile.AuthConfigs[serverAddress] + if !ok { + authconfig = cliconfig.AuthConfig{} + } + + if username == "" { + promptDefault("Username", authconfig.Username) + username = readInput(cli.in, cli.out) + username = strings.Trim(username, " ") + if username == "" { + username = authconfig.Username + } + } + // Assume that a different username means they may not want to use + // the password or email from the config file, so prompt them + if username != authconfig.Username { + if password == "" { + oldState, err := term.SaveState(cli.inFd) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.inFd, oldState) + + password = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.inFd, oldState) + if password == "" { + return fmt.Errorf("Error : Password Required") + } + } + + if email == "" { + promptDefault("Email", authconfig.Email) + email = readInput(cli.in, cli.out) + if email == "" { + email = authconfig.Email + } + } + } else { + // However, if they don't override the username use the + // password or email from the cmd line if specified. IOW, allow + // then to change/override them. And if not specified, just + // use what's in the config file + if password == "" { + password = authconfig.Password + } + if email == "" { + email = authconfig.Email + } + } + authconfig.Username = username + authconfig.Password = password + authconfig.Email = email + authconfig.ServerAddress = serverAddress + cli.configFile.AuthConfigs[serverAddress] = authconfig + + serverResp, err := cli.call("POST", "/auth", cli.configFile.AuthConfigs[serverAddress], nil) + if serverResp.statusCode == 401 { + delete(cli.configFile.AuthConfigs, serverAddress) + if err2 := cli.configFile.Save(); err2 != nil { + fmt.Fprintf(cli.out, "WARNING: could not save config file: %v\n", err2) + } + return err + } + if err != nil { + return err + } + + defer serverResp.body.Close() + + var response types.AuthResponse + if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil { + // Upon error, remove entry + delete(cli.configFile.AuthConfigs, serverAddress) + return err + } + + if err := cli.configFile.Save(); err != nil { + return fmt.Errorf("Error saving config file: %v", err) + } + fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s\n", cli.configFile.Filename()) + + if response.Status != "" { + fmt.Fprintf(cli.out, "%s\n", response.Status) + } + return nil +} diff --git a/api/client/logout.go b/api/client/logout.go new file mode 100644 index 00000000..e81299b1 --- /dev/null +++ b/api/client/logout.go @@ -0,0 +1,38 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/registry" +) + +// CmdLogout logs a user out from a Docker registry. +// +// If no server is specified, the user will be logged out from the registry's index server. +// +// Usage: docker logout [SERVER] +func (cli *DockerCli) CmdLogout(args ...string) error { + cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServer+"\" is the default.", true) + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + serverAddress := registry.IndexServer + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { + fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) + } else { + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + delete(cli.configFile.AuthConfigs, serverAddress) + + if err := cli.configFile.Save(); err != nil { + return fmt.Errorf("Failed to save docker config: %v", err) + } + } + return nil +} diff --git a/api/client/logs.go b/api/client/logs.go new file mode 100644 index 00000000..f1d647f3 --- /dev/null +++ b/api/client/logs.go @@ -0,0 +1,69 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + "time" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/timeutils" +) + +// CmdLogs fetches the logs of a given container. +// +// docker logs [OPTIONS] CONTAINER +func (cli *DockerCli) CmdLogs(args ...string) error { + cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, "Fetch the logs of a container", true) + follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + since := cmd.String([]string{"-since"}, "", "Show logs since timestamp") + times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + + serverResp, err := cli.call("GET", "/containers/"+name+"/json", nil, nil) + if err != nil { + return err + } + + var c types.ContainerJSON + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return err + } + + if logType := c.HostConfig.LogConfig.Type; logType != "json-file" { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver (got: %s)", logType) + } + + v := url.Values{} + v.Set("stdout", "1") + v.Set("stderr", "1") + + if *since != "" { + v.Set("since", timeutils.GetTimestamp(*since, time.Now())) + } + + if *times { + v.Set("timestamps", "1") + } + + if *follow { + v.Set("follow", "1") + } + v.Set("tail", *tail) + + sopts := &streamOpts{ + rawTerminal: c.Config.Tty, + out: cli.out, + err: cli.err, + } + + _, err = cli.stream("GET", "/containers/"+name+"/logs?"+v.Encode(), sopts) + return err +} diff --git a/api/client/network.go b/api/client/network.go new file mode 100644 index 00000000..b550668b --- /dev/null +++ b/api/client/network.go @@ -0,0 +1,15 @@ +// +build experimental + +package client + +import ( + "os" + + nwclient "github.com/docker/libnetwork/client" +) + +func (cli *DockerCli) CmdNetwork(args ...string) error { + nCli := nwclient.NewNetworkCli(cli.out, cli.err, nwclient.CallFunc(cli.callWrapper)) + args = append([]string{"network"}, args...) + return nCli.Cmd(os.Args[0], args...) +} diff --git a/api/client/pause.go b/api/client/pause.go new file mode 100644 index 00000000..94dd59d7 --- /dev/null +++ b/api/client/pause.go @@ -0,0 +1,32 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdPause pauses all processes within one or more containers. +// +// Usage: docker pause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdPause(args ...string) error { + cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, "Pause all processes within a container", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errNames []string + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, nil)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to pause containers: %v", errNames) + } + return nil +} diff --git a/api/client/port.go b/api/client/port.go new file mode 100644 index 00000000..d8bcbf6e --- /dev/null +++ b/api/client/port.go @@ -0,0 +1,72 @@ +package client + +import ( + "encoding/json" + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/nat" +) + +// CmdPort lists port mappings for a container. +// If a private port is specified, it also shows the public-facing port that is NATed to the private port. +// +// Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, "List port mappings for the CONTAINER, or lookup the public-facing port that\nis NAT-ed to the PRIVATE_PORT", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + var c struct { + NetworkSettings struct { + Ports nat.PortMap + } + } + + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return err + } + + if cmd.NArg() == 2 { + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/api/client/ps.go b/api/client/ps.go new file mode 100644 index 00000000..88ca1ee6 --- /dev/null +++ b/api/client/ps.go @@ -0,0 +1,116 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/client/ps" + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers/filters" +) + +// CmdPs outputs a list of Docker containers. +// +// Usage: docker ps [OPTIONS] +func (cli *DockerCli) CmdPs(args ...string) error { + var ( + err error + + psFilterArgs = filters.Args{} + v = url.Values{} + + cmd = Cli.Subcmd("ps", nil, "List containers", true) + quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") + all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container, include non-running") + since = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show created since Id or Name, include non-running") + before = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name") + last = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running") + format = cmd.String([]string{"-format"}, "", "Pretty-print containers using a Go template") + flFilter = opts.NewListOpts(nil) + ) + cmd.Require(flag.Exact, 0) + + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.ParseFlags(args, true) + if *last == -1 && *nLatest { + *last = 1 + } + + if *all { + v.Set("all", "1") + } + + if *last != -1 { + v.Set("limit", strconv.Itoa(*last)) + } + + if *since != "" { + v.Set("since", *since) + } + + if *before != "" { + v.Set("before", *before) + } + + if *size { + v.Set("size", "1") + } + + // Consolidate all filter flags, and sanity check them. + // They'll get processed in the daemon/server. + for _, f := range flFilter.GetAll() { + if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { + return err + } + } + + if len(psFilterArgs) > 0 { + filterJSON, err := filters.ToParam(psFilterArgs) + if err != nil { + return err + } + + v.Set("filters", filterJSON) + } + + serverResp, err := cli.call("GET", "/containers/json?"+v.Encode(), nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + containers := []types.Container{} + if err := json.NewDecoder(serverResp.body).Decode(&containers); err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.PsFormat()) > 0 && !*quiet { + f = cli.PsFormat() + } else { + f = "table" + } + } + + psCtx := ps.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Size: *size, + Trunc: !*noTrunc, + } + + ps.Format(psCtx, containers) + + return nil +} diff --git a/api/client/ps/custom.go b/api/client/ps/custom.go new file mode 100644 index 00000000..6d2518b5 --- /dev/null +++ b/api/client/ps/custom.go @@ -0,0 +1,220 @@ +package ps + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/units" +) + +const ( + tableKey = "table" + + idHeader = "CONTAINER ID" + imageHeader = "IMAGE" + namesHeader = "NAMES" + commandHeader = "COMMAND" + createdAtHeader = "CREATED AT" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + sizeHeader = "SIZE" + labelsHeader = "LABELS" +) + +type containerContext struct { + trunc bool + header []string + c types.Container +} + +func (c *containerContext) ID() string { + c.addHeader(idHeader) + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + c.addHeader(namesHeader) + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + c.addHeader(imageHeader) + if c.c.Image == "" { + return "" + } + return c.c.Image +} + +func (c *containerContext) Command() string { + c.addHeader(commandHeader) + command := c.c.Command + if c.trunc { + command = stringutils.Truncate(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.c.Created), 0).String() +} + +func (c *containerContext) RunningFor() string { + c.addHeader(runningForHeader) + createdAt := time.Unix(int64(c.c.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *containerContext) Ports() string { + c.addHeader(portsHeader) + return api.DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + c.addHeader(statusHeader) + return c.c.Status +} + +func (c *containerContext) Size() string { + c.addHeader(sizeHeader) + srw := units.HumanSize(float64(c.c.SizeRw)) + sv := units.HumanSize(float64(c.c.SizeRootFs)) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + c.addHeader(labelsHeader) + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.addHeader(h) + + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) fullHeader() string { + if c.header == nil { + return "" + } + return strings.Join(c.header, "\t") +} + +func (c *containerContext) addHeader(header string) { + if c.header == nil { + c.header = []string{} + } + c.header = append(c.header, strings.ToUpper(header)) +} + +func customFormat(ctx Context, containers []types.Container) { + var ( + table bool + header string + format = ctx.Format + buffer = bytes.NewBufferString("") + ) + + if strings.HasPrefix(ctx.Format, tableKey) { + table = true + format = format[len(tableKey):] + } + + format = strings.Trim(format, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + format = r.Replace(format) + + if table && ctx.Size { + format += "\t{{.Size}}" + } + + tmpl, err := template.New("").Parse(format) + if err != nil { + buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) + buffer.WriteTo(ctx.Output) + return + } + + for _, container := range containers { + containerCtx := &containerContext{ + trunc: ctx.Trunc, + c: container, + } + if err := tmpl.Execute(buffer, containerCtx); err != nil { + buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) + buffer.WriteTo(ctx.Output) + return + } + if table && len(header) == 0 { + header = containerCtx.fullHeader() + } + buffer.WriteString("\n") + } + + if table { + if len(header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + containerCtx := &containerContext{} + tmpl.Execute(bytes.NewBufferString(""), containerCtx) + header = containerCtx.fullHeader() + } + + t := tabwriter.NewWriter(ctx.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(header)) + t.Write([]byte("\n")) + buffer.WriteTo(t) + t.Flush() + } else { + buffer.WriteTo(ctx.Output) + } +} + +func stripNamePrefix(ss []string) []string { + for i, s := range ss { + ss[i] = s[1:] + } + + return ss +} diff --git a/api/client/ps/custom_test.go b/api/client/ps/custom_test.go new file mode 100644 index 00000000..dba2e891 --- /dev/null +++ b/api/client/ps/custom_test.go @@ -0,0 +1,102 @@ +package ps + +import ( + "bytes" + "reflect" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +func TestContainerPsContext(t *testing.T) { + containerId := stringid.GenerateRandomID() + unix := time.Now().Unix() + + var ctx containerContext + cases := []struct { + container types.Container + trunc bool + expValue string + expHeader string + call func() string + }{ + {types.Container{ID: containerId}, true, stringid.TruncateID(containerId), idHeader, ctx.ID}, + {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, + {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, + {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, + {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, + {types.Container{Created: int(unix)}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, + {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, + {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, + {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, + {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, + {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, + } + + for _, c := range cases { + ctx = containerContext{c: c.container, trunc: c.trunc} + v := c.call() + if strings.Contains(v, ",") { + // comma-separated values means probably a map input, which won't + // be guaranteed to have the same order as our expected value + // We'll create maps and use reflect.DeepEquals to check instead: + entriesMap := make(map[string]string) + expMap := make(map[string]string) + entries := strings.Split(v, ",") + expectedEntries := strings.Split(c.expValue, ",") + for _, entry := range entries { + keyval := strings.Split(entry, "=") + entriesMap[keyval[0]] = keyval[1] + } + for _, expected := range expectedEntries { + keyval := strings.Split(expected, "=") + expMap[keyval[0]] = keyval[1] + } + if !reflect.DeepEqual(expMap, entriesMap) { + t.Fatalf("Expected entries: %v, got: %v", c.expValue, v) + } + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.fullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } + + c := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} + ctx = containerContext{c: c, trunc: true} + + sid := ctx.Label("com.docker.swarm.swarm-id") + node := ctx.Label("com.docker.swarm.node_name") + if sid != "33" { + t.Fatalf("Expected 33, was %s\n", sid) + } + + if node != "ubuntu" { + t.Fatalf("Expected ubuntu, was %s\n", node) + } + + h := ctx.fullHeader() + if h != "SWARM ID\tNODE NAME" { + t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) + + } +} + +func TestContainerPsFormatError(t *testing.T) { + out := bytes.NewBufferString("") + ctx := Context{ + Format: "{{InvalidFunction}}", + Output: out, + } + + customFormat(ctx, make([]types.Container, 0)) + if out.String() != "Template parsing error: template: :1: function \"InvalidFunction\" not defined\n" { + t.Fatalf("Expected format error, got `%v`\n", out.String()) + } +} diff --git a/api/client/ps/formatter.go b/api/client/ps/formatter.go new file mode 100644 index 00000000..9b3bdc81 --- /dev/null +++ b/api/client/ps/formatter.go @@ -0,0 +1,65 @@ +package ps + +import ( + "io" + + "github.com/docker/docker/api/types" +) + +const ( + tableFormatKey = "table" + rawFormatKey = "raw" + + defaultTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + defaultQuietFormat = "{{.ID}}" +) + +type Context struct { + Output io.Writer + Format string + Size bool + Quiet bool + Trunc bool +} + +func Format(ctx Context, containers []types.Container) { + switch ctx.Format { + case tableFormatKey: + tableFormat(ctx, containers) + case rawFormatKey: + rawFormat(ctx, containers) + default: + customFormat(ctx, containers) + } +} + +func rawFormat(ctx Context, containers []types.Container) { + if ctx.Quiet { + ctx.Format = `container_id: {{.ID}}` + } else { + ctx.Format = `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{.Status}} +names: {{.Names}} +labels: {{.Labels}} +ports: {{.Ports}} +` + if ctx.Size { + ctx.Format += `size: {{.Size}} +` + } + } + + customFormat(ctx, containers) +} + +func tableFormat(ctx Context, containers []types.Container) { + ctx.Format = defaultTableFormat + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + + customFormat(ctx, containers) +} diff --git a/api/client/pull.go b/api/client/pull.go new file mode 100644 index 00000000..d6b85543 --- /dev/null +++ b/api/client/pull.go @@ -0,0 +1,53 @@ +package client + +import ( + "fmt" + "net/url" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/graph/tags" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" +) + +// CmdPull pulls an image or a repository from the registry. +// +// Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, "Pull an image or a repository from a registry", true) + allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") + addTrustedFlags(cmd, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + remote := cmd.Arg(0) + + taglessRemote, tag := parsers.ParseRepositoryTag(remote) + if tag == "" && !*allTags { + tag = tags.DEFAULTTAG + fmt.Fprintf(cli.out, "Using default tag: %s\n", tag) + } else if tag != "" && *allTags { + return fmt.Errorf("tag can't be used with --all-tags/-a") + } + + ref := registry.ParseReference(tag) + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) + if err != nil { + return err + } + + if isTrusted() && !ref.HasDigest() { + // Check if tag is digest + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) + return cli.trustedPull(repoInfo, ref, authConfig) + } + + v := url.Values{} + v.Set("fromImage", ref.ImageName(taglessRemote)) + + _, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull") + return err +} diff --git a/api/client/push.go b/api/client/push.go new file mode 100644 index 00000000..5d01511c --- /dev/null +++ b/api/client/push.go @@ -0,0 +1,53 @@ +package client + +import ( + "fmt" + "net/url" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" +) + +// CmdPush pushes an image or repository to the registry. +// +// Usage: docker push NAME[:TAG] +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, "Push an image or a repository to a registry", true) + addTrustedFlags(cmd, false) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + remote, tag := parsers.ParseRepositoryTag(cmd.Arg(0)) + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(remote) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) + // If we're not using a custom registry, we know the restrictions + // applied to repository names and can warn the user in advance. + // Custom repositories can have different rules, and we must also + // allow pushing by image ID. + if repoInfo.Official { + username := authConfig.Username + if username == "" { + username = "" + } + return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository to / (ex: %s/%s)", username, repoInfo.LocalName) + } + + if isTrusted() { + return cli.trustedPush(repoInfo, tag, authConfig) + } + + v := url.Values{} + v.Set("tag", tag) + + _, _, err = cli.clientRequestAttemptLogin("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, repoInfo.Index, "push") + return err +} diff --git a/api/client/rename.go b/api/client/rename.go new file mode 100644 index 00000000..ae09a462 --- /dev/null +++ b/api/client/rename.go @@ -0,0 +1,27 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRename renames a container. +// +// Usage: docker rename OLD_NAME NEW_NAME +func (cli *DockerCli) CmdRename(args ...string) error { + cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, "Rename a container", true) + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + oldName := cmd.Arg(0) + newName := cmd.Arg(1) + + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/rename?name=%s", oldName, newName), nil, nil)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + return fmt.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/api/client/restart.go b/api/client/restart.go new file mode 100644 index 00000000..88de4f75 --- /dev/null +++ b/api/client/restart.go @@ -0,0 +1,39 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRestart restarts one or more running containers. +// +// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, "Restart a running container", true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var errNames []string + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, nil)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to restart containers: %v", errNames) + } + return nil +} diff --git a/api/client/rm.go b/api/client/rm.go new file mode 100644 index 00000000..5766727a --- /dev/null +++ b/api/client/rm.go @@ -0,0 +1,55 @@ +package client + +import ( + "fmt" + "net/url" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRm removes one or more containers. +// +// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, "Remove one or more containers", true) + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") + link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link") + force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + val := url.Values{} + if *v { + val.Set("v", "1") + } + if *link { + val.Set("link", "1") + } + + if *force { + val.Set("force", "1") + } + + var errNames []string + for _, name := range cmd.Args() { + if name == "" { + return fmt.Errorf("Container name cannot be empty") + } + name = strings.Trim(name, "/") + + _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to remove containers: %v", errNames) + } + return nil +} diff --git a/api/client/rmi.go b/api/client/rmi.go new file mode 100644 index 00000000..25d5646e --- /dev/null +++ b/api/client/rmi.go @@ -0,0 +1,61 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRmi removes all images with the specified name(s). +// +// Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdRmi(args ...string) error { + cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, "Remove one or more images", true) + force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") + noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var errNames []string + for _, name := range cmd.Args() { + serverResp, err := cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, nil) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + defer serverResp.body.Close() + + dels := []types.ImageDelete{} + if err := json.NewDecoder(serverResp.body).Decode(&dels); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + continue + } + + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged) + } + } + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to remove images: %v", errNames) + } + return nil +} diff --git a/api/client/run.go b/api/client/run.go new file mode 100644 index 00000000..182dc4b1 --- /dev/null +++ b/api/client/run.go @@ -0,0 +1,257 @@ +package client + +import ( + "fmt" + "io" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork/resolvconf/dns" +) + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +// CmdRun runs a command in a new container. +// +// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdRun(args ...string) error { + cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, "Run a command in a new container", true) + addTrustedFlags(cmd, true) + + // These are flags not stored in Config/HostConfig + var ( + flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") + flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process") + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + flAttach *opts.ListOpts + + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ) + + config, hostConfig, cmd, err := runconfig.Parse(cmd, args) + // just in case the Parse does not exit + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(1) + } + + if len(hostConfig.Dns) > 0 { + // check the DNS settings passed via --dns against + // localhost regexp to warn if they are trying to + // set a DNS to a localhost address + for _, dnsIP := range hostConfig.Dns { + if dns.IsLocalhost(dnsIP) { + fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) + break + } + } + } + if config.Image == "" { + cmd.Usage() + return nil + } + + if !*flDetach { + if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if fl := cmd.Lookup("-attach"); fl != nil { + flAttach = fl.Value.(*opts.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + if *flAutoRemove { + return ErrConflictDetachAutoRemove + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable flSigProxy when in TTY mode + sigProxy := *flSigProxy + if config.Tty { + sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize() + } + + createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + if sigProxy { + sigc := cli.forwardAllSignals(createResponse.ID) + defer signal.StopCatch(sigc) + } + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintf(cli.out, "%s\n", createResponse.ID) + }() + } + if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { + return ErrConflictRestartPolicyAndAutoRemove + } + // We need to instantiate the chan because the select needs it. It can + // be closed but can't be uninitialized. + hijacked := make(chan io.Closer) + // Block the return until the chan gets closed + defer func() { + logrus.Debugf("End of CmdRun(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)") + } + }() + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + v = url.Values{} + ) + v.Set("stream", "1") + if config.AttachStdin { + v.Set("stdin", "1") + in = cli.in + } + if config.AttachStdout { + v.Set("stdout", "1") + out = cli.out + } + if config.AttachStderr { + v.Set("stderr", "1") + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + errCh = promise.Go(func() error { + return cli.hijack("POST", "/containers/"+createResponse.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil) + }) + } else { + close(hijacked) + } + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that the hijack gets closed when returning (results + // in closing the hijack chan and freeing server's goroutines) + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + defer func() { + if *flAutoRemove { + if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil { + fmt.Fprintf(cli.err, "Error deleting container: %s\n", err) + } + } + }() + + //start the container + if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, nil)); err != nil { + return err + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(createResponse.ID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + var status int + + // Attached mode + if *flAutoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, nil)); err != nil { + return err + } + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } else { + // No Autoremove: Simply retrieve the exit code + if !config.Tty { + // In non-TTY mode, we can't detach, so we must wait for container exit + if status, err = waitForExit(cli, createResponse.ID); err != nil { + return err + } + } else { + // In TTY mode, there is a race: if the process dies too slowly, the state could + // be updated after the getExitCode call and result in the wrong exit code being reported + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/api/client/save.go b/api/client/save.go new file mode 100644 index 00000000..5155319c --- /dev/null +++ b/api/client/save.go @@ -0,0 +1,58 @@ +package client + +import ( + "errors" + "io" + "net/url" + "os" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdSave saves one or more images to a tar archive. +// +// The tar archive is written to STDOUT by default, or written to a file. +// +// Usage: docker save [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, "Save an image(s) to a tar archive (streamed to STDOUT by default)", true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + output io.Writer = cli.out + err error + ) + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } else if cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + sopts := &streamOpts{ + rawTerminal: true, + out: output, + } + + if len(cmd.Args()) == 1 { + image := cmd.Arg(0) + if _, err := cli.stream("GET", "/images/"+image+"/get", sopts); err != nil { + return err + } + } else { + v := url.Values{} + for _, arg := range cmd.Args() { + v.Add("names", arg) + } + if _, err := cli.stream("GET", "/images/get?"+v.Encode(), sopts); err != nil { + return err + } + } + return nil +} diff --git a/api/client/search.go b/api/client/search.go new file mode 100644 index 00000000..2305d083 --- /dev/null +++ b/api/client/search.go @@ -0,0 +1,87 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + "sort" + "strings" + "text/tabwriter" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" +) + +// ByStars sorts search results in ascending order by number of stars. +type ByStars []registry.SearchResult + +func (r ByStars) Len() int { return len(r) } +func (r ByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r ByStars) Less(i, j int) bool { return r[i].StarCount < r[j].StarCount } + +// CmdSearch searches the Docker Hub for images. +// +// Usage: docker search [OPTIONS] TERM +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := Cli.Subcmd("search", []string{"TERM"}, "Search the Docker Hub for images", true) + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds") + automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") + stars := cmd.Uint([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + v := url.Values{} + v.Set("term", name) + + // Resolve the Repository name from fqn to hostname + name + taglessRemote, _ := parsers.ParseRepositoryTag(name) + repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) + if err != nil { + return err + } + + rdr, _, err := cli.clientRequestAttemptLogin("GET", "/images/search?"+v.Encode(), nil, nil, repoInfo.Index, "search") + if err != nil { + return err + } + + defer rdr.Close() + + results := ByStars{} + if err := json.NewDecoder(rdr).Decode(&results); err != nil { + return err + } + + sort.Sort(sort.Reverse(results)) + + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + if ((*automated || *trusted) && (!res.IsTrusted && !res.IsAutomated)) || (int(*stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = stringutils.Truncate(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated || res.IsTrusted { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} diff --git a/api/client/service.go b/api/client/service.go new file mode 100644 index 00000000..6a1e3da8 --- /dev/null +++ b/api/client/service.go @@ -0,0 +1,15 @@ +// +build experimental + +package client + +import ( + "os" + + nwclient "github.com/docker/libnetwork/client" +) + +func (cli *DockerCli) CmdService(args ...string) error { + nCli := nwclient.NewNetworkCli(cli.out, cli.err, nwclient.CallFunc(cli.callWrapper)) + args = append([]string{"service"}, args...) + return nCli.Cmd(os.Args[0], args...) +} diff --git a/api/client/start.go b/api/client/start.go new file mode 100644 index 00000000..e039df02 --- /dev/null +++ b/api/client/start.go @@ -0,0 +1,170 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" +) + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) + } + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +// CmdStart starts one or more stopped containers. +// +// Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStart(args ...string) error { + cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, "Start one or more stopped containers", true) + attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") + openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + cErr chan error + tty bool + ) + + if *attach || *openStdin { + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + var c types.ContainerJSON + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return err + } + + tty = c.Config.Tty + + if !tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + + if *openStdin && c.Config.OpenStdin { + v.Set("stdin", "1") + in = cli.in + } + + v.Set("stdout", "1") + v.Set("stderr", "1") + + hijacked := make(chan io.Closer) + // Block the return until the chan gets closed + defer func() { + logrus.Debugf("CmdStart() returned, defer waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)") + } + cli.in.Close() + }() + cErr = promise.Go(func() error { + return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil) + }) + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that the hijack gets closed when returning (results + // in closing the hijack chan and freeing server's goroutines) + if closer != nil { + defer closer.Close() + } + case err := <-cErr: + if err != nil { + return err + } + } + } + + var encounteredError error + var errNames []string + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, nil)) + if err != nil { + if !*attach && !*openStdin { + // attach and openStdin is false means it could be starting multiple containers + // when a container start failed, show the error message and start next + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + encounteredError = err + } + } else { + if !*attach && !*openStdin { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + } + + if len(errNames) > 0 { + encounteredError = fmt.Errorf("Error: failed to start containers: %v", errNames) + } + if encounteredError != nil { + return encounteredError + } + + if *openStdin || *attach { + if tty && cli.isTerminalOut { + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + _, status, err := getExitCode(cli, cmd.Arg(0)) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + } + return nil +} diff --git a/api/client/stats.go b/api/client/stats.go new file mode 100644 index 00000000..1feb1e11 --- /dev/null +++ b/api/client/stats.go @@ -0,0 +1,202 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "sort" + "strings" + "sync" + "text/tabwriter" + "time" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/units" +) + +type containerStats struct { + Name string + CPUPercentage float64 + Memory float64 + MemoryLimit float64 + MemoryPercentage float64 + NetworkRx float64 + NetworkTx float64 + mu sync.RWMutex + err error +} + +func (s *containerStats) Collect(cli *DockerCli, streamStats bool) { + v := url.Values{} + if streamStats { + v.Set("stream", "1") + } else { + v.Set("stream", "0") + } + serverResp, err := cli.call("GET", "/containers/"+s.Name+"/stats?"+v.Encode(), nil, nil) + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + + defer serverResp.body.Close() + + var ( + previousCPU uint64 + previousSystem uint64 + dec = json.NewDecoder(serverResp.body) + u = make(chan error, 1) + ) + go func() { + for { + var v *types.Stats + if err := dec.Decode(&v); err != nil { + u <- err + return + } + var ( + memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 + cpuPercent = 0.0 + ) + previousCPU = v.PreCpuStats.CpuUsage.TotalUsage + previousSystem = v.PreCpuStats.SystemUsage + cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) + s.mu.Lock() + s.CPUPercentage = cpuPercent + s.Memory = float64(v.MemoryStats.Usage) + s.MemoryLimit = float64(v.MemoryStats.Limit) + s.MemoryPercentage = memPercent + s.NetworkRx = float64(v.Network.RxBytes) + s.NetworkTx = float64(v.Network.TxBytes) + s.mu.Unlock() + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.mu.Lock() + s.CPUPercentage = 0 + s.Memory = 0 + s.MemoryPercentage = 0 + s.mu.Unlock() + case err := <-u: + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + } + if !streamStats { + return + } + } +} + +func (s *containerStats) Display(w io.Writer) error { + s.mu.RLock() + defer s.mu.RUnlock() + if s.err != nil { + return s.err + } + fmt.Fprintf(w, "%s\t%.2f%%\t%s/%s\t%.2f%%\t%s/%s\n", + s.Name, + s.CPUPercentage, + units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), + s.MemoryPercentage, + units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx)) + return nil +} + +// CmdStats displays a live stream of resource usage statistics for one or more containers. +// +// This shows real-time information on CPU usage, memory usage, and network I/O. +// +// Usage: docker stats CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStats(args ...string) error { + cmd := Cli.Subcmd("stats", []string{"CONTAINER [CONTAINER...]"}, "Display a live stream of one or more containers' resource usage statistics", true) + noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + names := cmd.Args() + sort.Strings(names) + var ( + cStats []*containerStats + w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + ) + printHeader := func() { + if !*noStream { + fmt.Fprint(cli.out, "\033[2J") + fmt.Fprint(cli.out, "\033[H") + } + io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O\n") + } + for _, n := range names { + s := &containerStats{Name: n} + cStats = append(cStats, s) + go s.Collect(cli, !*noStream) + } + // do a quick pause so that any failed connections for containers that do not exist are able to be + // evicted before we display the initial or default values. + time.Sleep(1500 * time.Millisecond) + var errs []string + for _, c := range cStats { + c.mu.Lock() + if c.err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) + } + c.mu.Unlock() + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, ", ")) + } + for range time.Tick(500 * time.Millisecond) { + printHeader() + toRemove := []int{} + for i, s := range cStats { + if err := s.Display(w); err != nil && !*noStream { + toRemove = append(toRemove, i) + } + } + for j := len(toRemove) - 1; j >= 0; j-- { + i := toRemove[j] + cStats = append(cStats[:i], cStats[i+1:]...) + } + if len(cStats) == 0 { + return nil + } + w.Flush() + if *noStream { + break + } + } + return nil +} + +func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.Stats) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CpuStats.CpuUsage.TotalUsage - previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CpuStats.SystemUsage - previousSystem) + ) + + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CpuStats.CpuUsage.PercpuUsage)) * 100.0 + } + return cpuPercent +} diff --git a/api/client/stats_unit_test.go b/api/client/stats_unit_test.go new file mode 100644 index 00000000..0831dbcb --- /dev/null +++ b/api/client/stats_unit_test.go @@ -0,0 +1,29 @@ +package client + +import ( + "bytes" + "sync" + "testing" +) + +func TestDisplay(t *testing.T) { + c := &containerStats{ + Name: "app", + CPUPercentage: 30.0, + Memory: 100 * 1024 * 1024.0, + MemoryLimit: 2048 * 1024 * 1024.0, + MemoryPercentage: 100.0 / 2048.0 * 100.0, + NetworkRx: 100 * 1024 * 1024, + NetworkTx: 800 * 1024 * 1024, + mu: sync.RWMutex{}, + } + var b bytes.Buffer + if err := c.Display(&b); err != nil { + t.Fatalf("c.Display() gave error: %s", err) + } + got := b.String() + want := "app\t30.00%\t104.9 MB/2.147 GB\t4.88%\t104.9 MB/838.9 MB\n" + if got != want { + t.Fatalf("c.Display() = %q, want %q", got, want) + } +} diff --git a/api/client/stop.go b/api/client/stop.go new file mode 100644 index 00000000..b7348a7b --- /dev/null +++ b/api/client/stop.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdStop stops one or more running containers. +// +// A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds). +// +// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, "Stop a running container by sending SIGTERM and then SIGKILL after a\ngrace period", true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var errNames []string + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, nil)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to stop containers: %v", errNames) + } + return nil +} diff --git a/api/client/tag.go b/api/client/tag.go new file mode 100644 index 00000000..454c7ec5 --- /dev/null +++ b/api/client/tag.go @@ -0,0 +1,42 @@ +package client + +import ( + "net/url" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" +) + +// CmdTag tags an image into a repository. +// +// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, "Tag an image into a repository", true) + force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + var ( + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) + v = url.Values{} + ) + + //Check if the given image name can be resolved + if err := registry.ValidateRepositoryName(repository); err != nil { + return err + } + v.Set("repo", repository) + v.Set("tag", tag) + + if *force { + v.Set("force", "1") + } + + if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, nil)); err != nil { + return err + } + return nil +} diff --git a/api/client/top.go b/api/client/top.go new file mode 100644 index 00000000..c9934fe0 --- /dev/null +++ b/api/client/top.go @@ -0,0 +1,49 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + "text/tabwriter" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdTop displays the running processes of a container. +// +// Usage: docker top CONTAINER +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, "Display the running processes of a container", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + val := url.Values{} + if cmd.NArg() > 1 { + val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) + } + + serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + procList := types.ContainerProcessList{} + if err := json.NewDecoder(serverResp.body).Decode(&procList); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/api/client/trust.go b/api/client/trust.go new file mode 100644 index 00000000..4d984cfa --- /dev/null +++ b/api/client/trust.go @@ -0,0 +1,454 @@ +package client + +import ( + "bufio" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/ansiescape" + "github.com/docker/docker/pkg/ioutils" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/docker/registry" + "github.com/docker/notary/client" + "github.com/docker/notary/pkg/passphrase" + "github.com/docker/notary/trustmanager" + "github.com/endophage/gotuf/data" +) + +var untrusted bool + +func addTrustedFlags(fs *flag.FlagSet, verify bool) { + var trusted bool + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + trusted = true + } + } + message := "Skip image signing" + if verify { + message = "Skip image verification" + } + fs.BoolVar(&untrusted, []string{"-disable-content-trust"}, !trusted, message) +} + +func isTrusted() bool { + return !untrusted +} + +var targetRegexp = regexp.MustCompile(`([\S]+): digest: ([\S]+) size: ([\d]+)`) + +type target struct { + reference registry.Reference + digest digest.Digest + size int64 +} + +func (cli *DockerCli) trustDirectory() string { + return filepath.Join(cliconfig.ConfigDir(), "trust") +} + +// certificateDirectory returns the directory containing +// TLS certificates for the given server. An error is +// returned if there was an error parsing the server string. +func (cli *DockerCli) certificateDirectory(server string) (string, error) { + u, err := url.Parse(server) + if err != nil { + return "", err + } + + return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil +} + +func trustServer(index *registry.IndexInfo) string { + if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { + if !strings.HasPrefix(s, "https://") { + return "https://" + s + } + return s + } + if index.Official { + return registry.NotaryServer + } + return "https://" + index.Name +} + +type simpleCredentialStore struct { + auth cliconfig.AuthConfig +} + +func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { + return scs.auth.Username, scs.auth.Password +} + +func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, authConfig cliconfig.AuthConfig) (*client.NotaryRepository, error) { + server := trustServer(repoInfo.Index) + if !strings.HasPrefix(server, "https://") { + return nil, errors.New("unsupported scheme: https required for trust server") + } + + var cfg = tlsconfig.ClientDefault + cfg.InsecureSkipVerify = !repoInfo.Index.Secure + + // Get certificate base directory + certDir, err := cli.certificateDirectory(server) + if err != nil { + return nil, err + } + logrus.Debugf("reading certificate directory: %s", certDir) + + if err := registry.ReadCertsDirectory(&cfg, certDir); err != nil { + return nil, err + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: &cfg, + DisableKeepAlives: true, + } + + // Skip configuration headers since request is not going to Docker daemon + modifiers := registry.DockerHeaders(http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 5 * time.Second, + } + endpointStr := server + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + challengeManager := auth.NewSimpleChallengeManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } + + creds := simpleCredentialStore{auth: authConfig} + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.CanonicalName, "push", "pull") + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) + tr := transport.NewTransport(base, modifiers...) + + return client.NewNotaryRepository(cli.trustDirectory(), repoInfo.CanonicalName, server, tr, cli.getPassphraseRetriever()) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + reference: registry.ParseReference(t.Name), + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { + aliasMap := map[string]string{ + "root": "offline", + "snapshot": "tagging", + "targets": "tagging", + } + baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap) + env := map[string]string{ + "root": os.Getenv("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE"), + "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"), + "targets": os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"), + } + return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { + if v := env[alias]; v != "" { + return v, numAttempts > 1, nil + } + return baseRetriever(keyName, alias, createNew, numAttempts) + } +} + +func (cli *DockerCli) trustedReference(repo string, ref registry.Reference) (registry.Reference, error) { + repoInfo, err := registry.ParseRepositoryInfo(repo) + if err != nil { + return nil, err + } + + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) + + notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) + if err != nil { + fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) + return nil, err + } + + t, err := notaryRepo.GetTargetByName(ref.String()) + if err != nil { + return nil, err + } + r, err := convertTarget(*t) + if err != nil { + return nil, err + + } + + return registry.DigestReference(r.digest), nil +} + +func (cli *DockerCli) tagTrusted(repoInfo *registry.RepositoryInfo, trustedRef, ref registry.Reference) error { + fullName := trustedRef.ImageName(repoInfo.LocalName) + fmt.Fprintf(cli.out, "Tagging %s as %s\n", fullName, ref.ImageName(repoInfo.LocalName)) + tv := url.Values{} + tv.Set("repo", repoInfo.LocalName) + tv.Set("tag", ref.String()) + tv.Set("force", "1") + + if _, _, err := readBody(cli.call("POST", "/images/"+fullName+"/tag?"+tv.Encode(), nil, nil)); err != nil { + return err + } + + return nil +} + +func notaryError(err error) error { + switch err.(type) { + case *json.SyntaxError: + logrus.Debugf("Notary syntax error: %s", err) + return errors.New("no trust data available for remote repository") + case client.ErrExpired: + return fmt.Errorf("remote repository out-of-date: %v", err) + case trustmanager.ErrKeyNotFound: + return fmt.Errorf("signing keys not found: %v", err) + } + + return err +} + +func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig cliconfig.AuthConfig) error { + var ( + v = url.Values{} + refs = []target{} + ) + + notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) + if err != nil { + fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) + return err + } + + if ref.String() == "" { + // List all targets + targets, err := notaryRepo.ListTargets() + if err != nil { + return notaryError(err) + } + for _, tgt := range targets { + t, err := convertTarget(*tgt) + if err != nil { + fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.LocalName) + continue + } + refs = append(refs, t) + } + } else { + t, err := notaryRepo.GetTargetByName(ref.String()) + if err != nil { + return notaryError(err) + } + r, err := convertTarget(*t) + if err != nil { + return err + + } + refs = append(refs, r) + } + + v.Set("fromImage", repoInfo.LocalName) + for i, r := range refs { + displayTag := r.reference.String() + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.out, "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.LocalName, displayTag, r.digest) + v.Set("tag", r.digest.String()) + + _, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull") + if err != nil { + return err + } + + // If reference is not trusted, tag by trusted reference + if !r.reference.HasDigest() { + if err := cli.tagTrusted(repoInfo, registry.DigestReference(r.digest), r.reference); err != nil { + return err + + } + } + } + return nil +} + +func selectKey(keys map[string]string) string { + if len(keys) == 0 { + return "" + } + + keyIDs := []string{} + for k := range keys { + keyIDs = append(keyIDs, k) + } + + // TODO(dmcgowan): let user choose if multiple keys, now pick consistently + sort.Strings(keyIDs) + + return keyIDs[0] +} + +func targetStream(in io.Writer) (io.WriteCloser, <-chan []target) { + r, w := io.Pipe() + out := io.MultiWriter(in, w) + targetChan := make(chan []target) + + go func() { + targets := []target{} + scanner := bufio.NewScanner(r) + scanner.Split(ansiescape.ScanANSILines) + for scanner.Scan() { + line := scanner.Bytes() + if matches := targetRegexp.FindSubmatch(line); len(matches) == 4 { + dgst, err := digest.ParseDigest(string(matches[2])) + if err != nil { + // Line does match what is expected, continue looking for valid lines + logrus.Debugf("Bad digest value %q in matched line, ignoring\n", string(matches[2])) + continue + } + s, err := strconv.ParseInt(string(matches[3]), 10, 64) + if err != nil { + // Line does match what is expected, continue looking for valid lines + logrus.Debugf("Bad size value %q in matched line, ignoring\n", string(matches[3])) + continue + } + + targets = append(targets, target{ + reference: registry.ParseReference(string(matches[1])), + digest: dgst, + size: s, + }) + } + } + targetChan <- targets + }() + + return ioutils.NewWriteCloserWrapper(out, w.Close), targetChan +} + +func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string, authConfig cliconfig.AuthConfig) error { + streamOut, targetChan := targetStream(cli.out) + + v := url.Values{} + v.Set("tag", tag) + + _, _, err := cli.clientRequestAttemptLogin("POST", "/images/"+repoInfo.LocalName+"/push?"+v.Encode(), nil, streamOut, repoInfo.Index, "push") + // Close stream channel to finish target parsing + if err := streamOut.Close(); err != nil { + return err + } + // Check error from request + if err != nil { + return err + } + + // Get target results + targets := <-targetChan + + if tag == "" { + fmt.Fprintf(cli.out, "No tag specified, skipping trust metadata push\n") + return nil + } + if len(targets) == 0 { + fmt.Fprintf(cli.out, "No targets found, skipping trust metadata push\n") + return nil + } + + fmt.Fprintf(cli.out, "Signing and pushing trust metadata\n") + + repo, err := cli.getNotaryRepository(repoInfo, authConfig) + if err != nil { + fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err) + return err + } + + for _, target := range targets { + h, err := hex.DecodeString(target.digest.Hex()) + if err != nil { + return err + } + t := &client.Target{ + Name: target.reference.String(), + Hashes: data.Hashes{ + string(target.digest.Algorithm()): h, + }, + Length: int64(target.size), + } + if err := repo.AddTarget(t); err != nil { + return err + } + } + + err = repo.Publish() + if _, ok := err.(*client.ErrRepoNotInitialized); !ok { + return notaryError(err) + } + + ks := repo.KeyStoreManager + keys := ks.RootKeyStore().ListKeys() + + rootKey := selectKey(keys) + if rootKey == "" { + rootKey, err = ks.GenRootKey("ecdsa") + if err != nil { + return err + } + } + + cryptoService, err := ks.GetRootCryptoService(rootKey) + if err != nil { + return err + } + + if err := repo.Initialize(cryptoService); err != nil { + return notaryError(err) + } + fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.CanonicalName) + + return notaryError(repo.Publish()) +} diff --git a/api/client/unpause.go b/api/client/unpause.go new file mode 100644 index 00000000..cd1e6766 --- /dev/null +++ b/api/client/unpause.go @@ -0,0 +1,32 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdUnpause unpauses all processes within a container, for one or more containers. +// +// Usage: docker unpause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUnpause(args ...string) error { + cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, "Unpause all processes within a container", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errNames []string + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, nil)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to unpause containers: %v", errNames) + } + return nil +} diff --git a/api/client/utils.go b/api/client/utils.go new file mode 100644 index 00000000..8f822155 --- /dev/null +++ b/api/client/utils.go @@ -0,0 +1,379 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + gosignal "os/signal" + "runtime" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" +) + +var ( + errConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") +) + +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// HTTPClient creates a new HTTP client with the cli's client transport instance. +func (cli *DockerCli) HTTPClient() *http.Client { + return &http.Client{Transport: cli.transport} +} + +func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers map[string][]string) (*serverResponse, error) { + + serverResp := &serverResponse{ + body: nil, + statusCode: -1, + } + + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && in == nil { + in = bytes.NewReader([]byte{}) + } + req, err := http.NewRequest(method, fmt.Sprintf("%s/v%s%s", cli.basePath, api.Version, path), in) + if err != nil { + return serverResp, err + } + + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.configFile.HTTPHeaders { + req.Header.Set(k, v) + } + + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION+" ("+runtime.GOOS+")") + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + + resp, err := cli.HTTPClient().Do(req) + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return serverResp, errConnectionRefused + } + + if cli.tlsConfig == nil { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?\n* Is your docker daemon up and running?", err) + } + if cli.tlsConfig != nil && strings.Contains(err.Error(), "remote error: bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reader, out io.Writer, index *registry.IndexInfo, cmdName string) (io.ReadCloser, int, error) { + cmdAttempt := func(authConfig cliconfig.AuthConfig) (io.ReadCloser, int, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return nil, -1, err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + // begin the request + serverResp, err := cli.clientRequest(method, path, in, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + if err == nil && out != nil { + // If we are streaming output, complete the stream since + // errors may not appear until later. + err = cli.streamBody(serverResp.body, serverResp.header.Get("Content-Type"), true, out, nil) + } + if err != nil { + // Since errors in a stream appear after status 200 has been written, + // we may need to change the status code. + if strings.Contains(err.Error(), "Authentication is required") || + strings.Contains(err.Error(), "Status 401") || + strings.Contains(err.Error(), "401 Unauthorized") || + strings.Contains(err.Error(), "status code 401") { + serverResp.statusCode = http.StatusUnauthorized + } + } + return serverResp.body, serverResp.statusCode, err + } + + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile, index) + body, statusCode, err := cmdAttempt(authConfig) + if statusCode == http.StatusUnauthorized { + fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) + if err = cli.CmdLogin(index.GetAuthConfigKey()); err != nil { + return nil, -1, err + } + authConfig = registry.ResolveAuthConfig(cli.configFile, index) + return cmdAttempt(authConfig) + } + return body, statusCode, err +} + +func (cli *DockerCli) callWrapper(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, http.Header, int, error) { + sr, err := cli.call(method, path, data, headers) + return sr.body, sr.header, sr.statusCode, err +} + +func (cli *DockerCli) call(method, path string, data interface{}, headers map[string][]string) (*serverResponse, error) { + params, err := cli.encodeData(data) + if err != nil { + sr := &serverResponse{ + body: nil, + header: nil, + statusCode: -1, + } + return sr, nil + } + + if data != nil { + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + } + + serverResp, err := cli.clientRequest(method, path, params, headers) + return serverResp, err +} + +type streamOpts struct { + rawTerminal bool + in io.Reader + out io.Writer + err io.Writer + headers map[string][]string +} + +func (cli *DockerCli) stream(method, path string, opts *streamOpts) (*serverResponse, error) { + serverResp, err := cli.clientRequest(method, path, opts.in, opts.headers) + if err != nil { + return serverResp, err + } + return serverResp, cli.streamBody(serverResp.body, serverResp.header.Get("Content-Type"), opts.rawTerminal, opts.out, opts.err) +} + +func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, rawTerminal bool, stdout, stderr io.Writer) error { + defer body.Close() + + if api.MatchesContentType(contentType, "application/json") { + return jsonmessage.DisplayJSONMessagesStream(body, stdout, cli.outFd, cli.isTerminalOut) + } + if stdout != nil || stderr != nil { + // When TTY is ON, use regular copy + var err error + if rawTerminal { + _, err = io.Copy(stdout, body) + } else { + _, err = stdcopy.StdCopy(stdout, stderr, body) + } + logrus.Debugf("[stream] End of stdout") + return err + } + return nil +} + +func (cli *DockerCli) resizeTty(id string, isExec bool) { + height, width := cli.getTtySize() + if height == 0 && width == 0 { + return + } + v := url.Values{} + v.Set("h", strconv.Itoa(height)) + v.Set("w", strconv.Itoa(width)) + + path := "" + if !isExec { + path = "/containers/" + id + "/resize?" + } else { + path = "/exec/" + id + "/resize?" + } + + if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, nil)); err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +func waitForExit(cli *DockerCli, containerID string) (int, error) { + serverResp, err := cli.call("POST", "/containers/"+containerID+"/wait", nil, nil) + if err != nil { + return -1, err + } + + defer serverResp.body.Close() + + var res types.ContainerWaitResponse + if err := json.NewDecoder(serverResp.body).Decode(&res); err != nil { + return -1, err + } + + return res.StatusCode, nil +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { + serverResp, err := cli.call("GET", "/containers/"+containerID+"/json", nil, nil) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != errConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + + defer serverResp.body.Close() + + var c types.ContainerJSON + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return false, -1, err + } + + return c.State.Running, c.State.ExitCode, nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { + serverResp, err := cli.call("GET", "/exec/"+execID+"/json", nil, nil) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != errConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + + defer serverResp.body.Close() + + //TODO: Should we reconsider having a type in api/types? + //this is a response to exex/id/json not container + var c struct { + Running bool + ExitCode int + } + + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return false, -1, err + } + + return c.Running, c.ExitCode, nil +} + +func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { + cli.resizeTty(id, isExec) + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.getTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.getTtySize() + + if prevW != w || prevH != h { + cli.resizeTty(id, isExec) + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + cli.resizeTty(id, isExec) + } + }() + } + return nil +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminalOut { + return 0, 0 + } + ws, err := term.GetWinsize(cli.outFd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} + +func readBody(serverResp *serverResponse, err error) ([]byte, int, error) { + if serverResp.body != nil { + defer serverResp.body.Close() + } + if err != nil { + return nil, serverResp.statusCode, err + } + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return nil, -1, err + } + return body, serverResp.statusCode, nil +} diff --git a/api/client/version.go b/api/client/version.go new file mode 100644 index 00000000..4c42a32d --- /dev/null +++ b/api/client/version.go @@ -0,0 +1,96 @@ +package client + +import ( + "encoding/json" + "runtime" + "text/template" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" +) + +var VersionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.ApiVersion}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} + Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.ApiVersion}} + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} + Experimental: {{.Server.Experimental}}{{end}}{{end}}` + +type VersionData struct { + Client types.Version + ServerOK bool + Server types.Version +} + +// CmdVersion shows Docker version information. +// +// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch. +// +// Usage: docker version +func (cli *DockerCli) CmdVersion(args ...string) (err error) { + cmd := Cli.Subcmd("version", nil, "Show the Docker version information.", true) + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + if *tmplStr == "" { + *tmplStr = VersionTemplate + } + + var tmpl *template.Template + if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { + return Cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + vd := VersionData{ + Client: types.Version{ + Version: dockerversion.VERSION, + ApiVersion: api.Version, + GoVersion: runtime.Version(), + GitCommit: dockerversion.GITCOMMIT, + BuildTime: dockerversion.BUILDTIME, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + Experimental: utils.ExperimentalBuild(), + }, + } + + defer func() { + if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil { + err = err2 + } + cli.out.Write([]byte{'\n'}) + }() + + serverResp, err := cli.call("GET", "/version", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + if err = json.NewDecoder(serverResp.body).Decode(&vd.Server); err != nil { + return Cli.StatusError{StatusCode: 1, + Status: "Error reading remote version: " + err.Error()} + } + + vd.ServerOK = true + + return +} diff --git a/api/client/wait.go b/api/client/wait.go new file mode 100644 index 00000000..829a320c --- /dev/null +++ b/api/client/wait.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdWait blocks until a container stops, then prints its exit code. +// +// If more than one container is specified, this will wait synchronously on each container. +// +// Usage: docker wait CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, "Block until a container stops, then print its exit code.", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errNames []string + for _, name := range cmd.Args() { + status, err := waitForExit(cli, name) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to wait containers: %v", errNames) + } + return nil +} diff --git a/api/common.go b/api/common.go new file mode 100644 index 00000000..8d65edab --- /dev/null +++ b/api/common.go @@ -0,0 +1,133 @@ +package api + +import ( + "fmt" + "mime" + "path/filepath" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/version" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // Current REST API version + Version version.Version = "1.20" + + // Minimun REST API version supported + MinVersion version.Version = "1.12" + + // Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +type ByPrivatePort []types.Port + +func (r ByPrivatePort) Len() int { return len(r) } +func (r ByPrivatePort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r ByPrivatePort) Less(i, j int) bool { return r[i].PrivatePort < r[j].PrivatePort } + +func DisplayablePorts(ports []types.Port) string { + var ( + result = []string{} + hostMappings = []string{} + firstInGroupMap map[string]int + lastInGroupMap map[string]int + ) + firstInGroupMap = make(map[string]int) + lastInGroupMap = make(map[string]int) + sort.Sort(ByPrivatePort(ports)) + for _, port := range ports { + var ( + current = port.PrivatePort + portKey = port.Type + firstInGroup int + lastInGroup int + ) + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + firstInGroup = firstInGroupMap[portKey] + lastInGroup = lastInGroupMap[portKey] + + if firstInGroup == 0 { + firstInGroupMap[portKey] = current + lastInGroupMap[portKey] = current + continue + } + + if current == (lastInGroup + 1) { + lastInGroupMap[portKey] = current + continue + } + result = append(result, FormGroup(portKey, firstInGroup, lastInGroup)) + firstInGroupMap[portKey] = current + lastInGroupMap[portKey] = current + } + for portKey, firstInGroup := range firstInGroupMap { + result = append(result, FormGroup(portKey, firstInGroup, lastInGroupMap[portKey])) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func FormGroup(key string, start, last int) string { + var ( + group string + parts = strings.Split(key, "/") + groupType = parts[0] + ip = "" + ) + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + if start == last { + group = fmt.Sprintf("%d", start) + } else { + group = fmt.Sprintf("%d-%d", start, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} diff --git a/api/server/form.go b/api/server/form.go new file mode 100644 index 00000000..6a8387a8 --- /dev/null +++ b/api/server/form.go @@ -0,0 +1,56 @@ +package server + +import ( + "fmt" + "net/http" + "strconv" + "strings" +) + +func boolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// boolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func boolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return boolValue(r, k) +} + +func int64ValueOrZero(r *http.Request, k string) int64 { + val, err := strconv.ParseInt(r.FormValue(k), 10, 64) + if err != nil { + return 0 + } + return val +} + +type archiveOptions struct { + name string + path string +} + +func archiveFormValues(r *http.Request, vars map[string]string) (archiveOptions, error) { + if vars == nil { + return archiveOptions{}, fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return archiveOptions{}, err + } + + name := vars["name"] + path := r.Form.Get("path") + + switch { + case name == "": + return archiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return archiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return archiveOptions{name, path}, nil +} diff --git a/api/server/form_test.go b/api/server/form_test.go new file mode 100644 index 00000000..5b3bd718 --- /dev/null +++ b/api/server/form_test.go @@ -0,0 +1,70 @@ +package server + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBoolValue(t *testing.T) { + cases := map[string]bool{ + "": false, + "0": false, + "no": false, + "false": false, + "none": false, + "1": true, + "yes": true, + "true": true, + "one": true, + "100": true, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := boolValue(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestBoolValueOrDefault(t *testing.T) { + r, _ := http.NewRequest("GET", "", nil) + if !boolValueOrDefault(r, "queryparam", true) { + t.Fatal("Expected to get true default value, got false") + } + + v := url.Values{} + v.Set("param", "") + r, _ = http.NewRequest("GET", "", nil) + r.Form = v + if boolValueOrDefault(r, "param", true) { + t.Fatal("Expected not to get true") + } +} + +func TestInt64ValueOrZero(t *testing.T) { + cases := map[string]int64{ + "": 0, + "asdf": 0, + "0": 0, + "1": 1, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := int64ValueOrZero(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} diff --git a/api/server/profiler.go b/api/server/profiler.go new file mode 100644 index 00000000..eebfe693 --- /dev/null +++ b/api/server/profiler.go @@ -0,0 +1,38 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +func ProfilerSetup(mainRouter *mux.Router, path string) { + var r = mainRouter.PathPrefix(path).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) + r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) + r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/api/server/server.go b/api/server/server.go new file mode 100644 index 00000000..22bcc376 --- /dev/null +++ b/api/server/server.go @@ -0,0 +1,1719 @@ +package server + +import ( + "crypto/tls" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "runtime" + "strconv" + "strings" + "time" + + "github.com/gorilla/mux" + "golang.org/x/net/websocket" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/builder" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/sockets" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/ulimit" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +type ServerConfig struct { + Logging bool + EnableCors bool + CorsHeaders string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +type Server struct { + daemon *daemon.Daemon + cfg *ServerConfig + router *mux.Router + start chan struct{} + servers []serverCloser +} + +func New(cfg *ServerConfig) *Server { + srv := &Server{ + cfg: cfg, + start: make(chan struct{}), + } + r := createRouter(srv) + srv.router = r + return srv +} + +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +type serverCloser interface { + Serve() error + Close() error +} + +// ServeApi loops through all of the protocols sent in to docker and spawns +// off a go routine to setup a serving http.Server for each. +func (s *Server) ServeApi(protoAddrs []string) error { + var chErrors = make(chan error, len(protoAddrs)) + + for _, protoAddr := range protoAddrs { + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return fmt.Errorf("bad format, expected PROTO://ADDR") + } + srv, err := s.newServer(protoAddrParts[0], protoAddrParts[1]) + if err != nil { + return err + } + s.servers = append(s.servers, srv...) + + for _, s := range srv { + logrus.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) + go func(s serverCloser) { + if err := s.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(s) + } + } + + for i := 0; i < len(protoAddrs); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +type HttpServer struct { + srv *http.Server + l net.Listener +} + +func (s *HttpServer) Serve() error { + return s.srv.Serve(s.l) +} +func (s *HttpServer) Close() error { + return s.l.Close() +} + +type HttpApiFunc func(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +func closeStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// Check to make sure request's Content-Type is application/json +func checkForJson(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +//If we don't do this, POST method without Content-type (even with empty body) will fail +func parseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func parseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func httpError(w http.ResponseWriter, err error) { + if err == nil || w == nil { + logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") + return + } + statusCode := http.StatusInternalServerError + // FIXME: this is brittle and should not be necessary. + // If we need to differentiate between different possible error types, we should + // create appropriate error types with clearly defined meaning. + errStr := strings.ToLower(err.Error()) + for keyword, status := range map[string]int{ + "not found": http.StatusNotFound, + "no such": http.StatusNotFound, + "bad parameter": http.StatusBadRequest, + "conflict": http.StatusConflict, + "impossible": http.StatusNotAcceptable, + "wrong login/password": http.StatusUnauthorized, + "hasn't been activated": http.StatusForbidden, + } { + if strings.Contains(errStr, keyword) { + statusCode = status + break + } + } + + logrus.WithFields(logrus.Fields{"statusCode": statusCode, "err": err}).Error("HTTP Error") + http.Error(w, err.Error(), statusCode) +} + +// writeJSON writes the value v to the http response stream as json with standard +// json encoding. +func writeJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return json.NewEncoder(w).Encode(v) +} + +func (s *Server) postAuth(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *cliconfig.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, err := s.daemon.RegistryService.Auth(config) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, &types.AuthResponse{ + Status: status, + }) +} + +func (s *Server) getVersion(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v := &types.Version{ + Version: dockerversion.VERSION, + ApiVersion: api.Version, + GitCommit: dockerversion.GITCOMMIT, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BUILDTIME, + } + + if version.GreaterThanOrEqualTo("1.19") { + v.Experimental = utils.ExperimentalBuild() + } + + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + v.KernelVersion = kernelVersion.String() + } + + return writeJSON(w, http.StatusOK, v) +} + +func (s *Server) postContainersKill(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + var sig uint64 + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + // Check if we passed the signal as a number: + // The largest legal signal is 31, so let's parse on 5 bits + sigN, err := strconv.ParseUint(sigStr, 10, 5) + if err != nil { + // The signal is not a number, treat it as a string (either like + // "KILL" or like "SIGKILL") + syscallSig, ok := signal.SignalMap[strings.TrimPrefix(sigStr, "SIG")] + if !ok { + return fmt.Errorf("Invalid signal: %s", sigStr) + } + sig = uint64(syscallSig) + } else { + sig = sigN + } + + if sig == 0 { + return fmt.Errorf("Invalid signal: %s", sigStr) + } + } + + if err := s.daemon.ContainerKill(name, sig); err != nil { + _, isStopped := err.(daemon.ErrContainerNotRunning) + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + if version.GreaterThanOrEqualTo("1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *Server) postContainersPause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + if err := s.daemon.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) postContainersUnpause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + if err := s.daemon.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) getContainersExport(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + return s.daemon.ContainerExport(vars["name"], w) +} + +func (s *Server) getImagesJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + imagesConfig := graph.ImagesConfig{ + Filters: r.Form.Get("filters"), + // FIXME this parameter could just be a match filter + Filter: r.Form.Get("filter"), + All: boolValue(r, "all"), + } + + images, err := s.daemon.Repositories().Images(&imagesConfig) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, images) +} + +func (s *Server) getInfo(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.daemon.SystemInfo() + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, info) +} + +func (s *Server) getEvents(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var since int64 = -1 + if r.Form.Get("since") != "" { + s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64) + if err != nil { + return err + } + since = s + } + + var until int64 = -1 + if r.Form.Get("until") != "" { + u, err := strconv.ParseInt(r.Form.Get("until"), 10, 64) + if err != nil { + return err + } + until = u + } + + timer := time.NewTimer(0) + timer.Stop() + if until > 0 { + dur := time.Unix(until, 0).Sub(time.Now()) + timer = time.NewTimer(dur) + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + isFiltered := func(field string, filter []string) bool { + if len(field) == 0 { + return false + } + if len(filter) == 0 { + return false + } + for _, v := range filter { + if v == field { + return false + } + if strings.Contains(field, ":") { + image := strings.Split(field, ":") + if image[0] == v { + return false + } + } + } + return true + } + + d := s.daemon + es := d.EventsService + w.Header().Set("Content-Type", "application/json") + outStream := ioutils.NewWriteFlusher(w) + outStream.Write(nil) // make sure response is sent immediately + enc := json.NewEncoder(outStream) + + getContainerId := func(cn string) string { + c, err := d.Get(cn) + if err != nil { + return "" + } + return c.ID + } + + sendEvent := func(ev *jsonmessage.JSONMessage) error { + //incoming container filter can be name,id or partial id, convert and replace as a full container id + for i, cn := range ef["container"] { + ef["container"][i] = getContainerId(cn) + } + + if isFiltered(ev.Status, ef["event"]) || (isFiltered(ev.ID, ef["image"]) && + isFiltered(ev.From, ef["image"])) || isFiltered(ev.ID, ef["container"]) { + return nil + } + + return enc.Encode(ev) + } + + current, l := es.Subscribe() + if since == -1 { + current = nil + } + defer es.Evict(l) + for _, ev := range current { + if ev.Time < since { + continue + } + if err := sendEvent(ev); err != nil { + return err + } + } + + var closeNotify <-chan bool + if closeNotifier, ok := w.(http.CloseNotifier); ok { + closeNotify = closeNotifier.CloseNotify() + } + + for { + select { + case ev := <-l: + jev, ok := ev.(*jsonmessage.JSONMessage) + if !ok { + continue + } + if err := sendEvent(jev); err != nil { + return err + } + case <-timer.C: + return nil + case <-closeNotify: + logrus.Debug("Client disconnected, stop sending events") + return nil + } + } +} + +func (s *Server) getImagesHistory(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + name := vars["name"] + history, err := s.daemon.Repositories().History(name) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, history) +} + +func (s *Server) getContainersChanges(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + changes, err := s.daemon.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, changes) +} + +func (s *Server) getContainersTop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if err := parseForm(r); err != nil { + return err + } + + procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, procList) +} + +func (s *Server) getContainersJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + config := &daemon.ContainersConfig{ + All: boolValue(r, "all"), + Size: boolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: r.Form.Get("filters"), + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.daemon.Containers(config) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, containers) +} + +func (s *Server) getContainersStats(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + stream := boolValueOrDefault(r, "stream", true) + var out io.Writer + if !stream { + w.Header().Set("Content-Type", "application/json") + out = w + } else { + out = ioutils.NewWriteFlusher(w) + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + config := &daemon.ContainerStatsConfig{ + Stream: stream, + OutStream: out, + Stop: closeNotifier, + } + + return s.daemon.ContainerStats(vars["name"], config) +} + +func (s *Server) getContainersLogs(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + // Validate args here, because we can't return not StatusOK after job.Run() call + stdout, stderr := boolValue(r, "stdout"), boolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + var since time.Time + if r.Form.Get("since") != "" { + s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64) + if err != nil { + return err + } + since = time.Unix(s, 0) + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + c, err := s.daemon.Get(vars["name"]) + if err != nil { + return err + } + + outStream := ioutils.NewWriteFlusher(w) + // write an empty chunk of data (this is to ensure that the + // HTTP Response is sent immediatly, even if the container has + // not yet produced any data) + outStream.Write(nil) + + logsConfig := &daemon.ContainerLogsConfig{ + Follow: boolValue(r, "follow"), + Timestamps: boolValue(r, "timestamps"), + Since: since, + Tail: r.Form.Get("tail"), + UseStdout: stdout, + UseStderr: stderr, + OutStream: outStream, + Stop: closeNotifier, + } + + if err := s.daemon.ContainerLogs(c, logsConfig); err != nil { + fmt.Fprintf(w, "Error running logs job: %s\n", err) + } + + return nil +} + +func (s *Server) postImagesTag(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + repo := r.Form.Get("repo") + tag := r.Form.Get("tag") + force := boolValue(r, "force") + name := vars["name"] + if err := s.daemon.Repositories().Tag(repo, tag, name, force); err != nil { + return err + } + s.daemon.EventsService.Log("tag", utils.ImageReference(repo, tag), "") + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *Server) postCommit(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + if err := checkForJson(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := boolValue(r, "pause") + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + pause = true + } + + c, _, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + + commitCfg := &builder.CommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Changes: r.Form["changes"], + Config: c, + } + + imgID, err := builder.Commit(cname, s.daemon, commitCfg) + if err != nil { + return err + } + + return writeJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ + ID: imgID, + }) +} + +// Creates an image from Pull or from Import +func (s *Server) postImagesCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + ) + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &cliconfig.AuthConfig{} + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &cliconfig.AuthConfig{} + } + } + + var ( + err error + output = ioutils.NewWriteFlusher(w) + ) + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + if tag == "" { + image, tag = parsers.ParseRepositoryTag(image) + } + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + imagePullConfig := &graph.ImagePullConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + OutStream: output, + } + + err = s.daemon.Repositories().Pull(image, tag, imagePullConfig) + } else { //import + if tag == "" { + repo, tag = parsers.ParseRepositoryTag(repo) + } + + src := r.Form.Get("fromSrc") + imageImportConfig := &graph.ImageImportConfig{ + Changes: r.Form["changes"], + InConfig: r.Body, + OutStream: output, + } + + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + var newConfig *runconfig.Config + newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, imageImportConfig.Changes) + if err != nil { + return err + } + imageImportConfig.ContainerConfig = newConfig + + err = s.daemon.Repositories().Import(src, repo, tag, imageImportConfig) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil + +} + +func (s *Server) getImagesSearch(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + config *cliconfig.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &cliconfig.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + query, err := s.daemon.RegistryService.Search(r.Form.Get("term"), config, headers) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, query.Results) +} + +func (s *Server) postImagesPush(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := parseForm(r); err != nil { + return err + } + authConfig := &cliconfig.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &cliconfig.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + name := vars["name"] + output := ioutils.NewWriteFlusher(w) + imagePushConfig := &graph.ImagePushConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + Tag: r.Form.Get("tag"), + OutStream: output, + } + + w.Header().Set("Content-Type", "application/json") + + if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil + +} + +func (s *Server) getImagesGet(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + imageExportConfig := &graph.ImageExportConfig{Outstream: output} + if name, ok := vars["name"]; ok { + imageExportConfig.Names = []string{name} + } else { + imageExportConfig.Names = r.Form["names"] + } + + if err := s.daemon.Repositories().ImageExport(imageExportConfig); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil + +} + +func (s *Server) postImagesLoad(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.daemon.Repositories().Load(r.Body, w) +} + +func (s *Server) postContainersCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if err := checkForJson(r); err != nil { + return err + } + var ( + warnings []string + name = r.Form.Get("name") + ) + + config, hostConfig, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil { + return err + } + adjustCpuShares(version, hostConfig) + + containerId, warnings, err := s.daemon.ContainerCreate(name, config, hostConfig) + if err != nil { + return err + } + + return writeJSON(w, http.StatusCreated, &types.ContainerCreateResponse{ + ID: containerId, + Warnings: warnings, + }) +} + +func (s *Server) postContainersRestart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + timeout, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) postContainerRename(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.daemon.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *Server) deleteContainers(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + name := vars["name"] + config := &daemon.ContainerRmConfig{ + ForceRemove: boolValue(r, "force"), + RemoveVolume: boolValue(r, "v"), + RemoveLink: boolValue(r, "link"), + } + + if err := s.daemon.ContainerRm(name, config); err != nil { + // Force a 404 for the empty string + if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { + return fmt.Errorf("no such id: \"\"") + } + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) deleteImages(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + name := vars["name"] + force := boolValue(r, "force") + noprune := boolValue(r, "noprune") + + list, err := s.daemon.ImageDelete(name, force, noprune) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, list) +} + +func (s *Server) postContainersStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + var hostConfig *runconfig.HostConfig + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := checkForJson(r); err != nil { + return err + } + + c, err := runconfig.DecodeHostConfig(r.Body) + if err != nil { + return err + } + + hostConfig = c + } + + if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil { + if err.Error() == "Container already started" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *Server) postContainersStop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + seconds, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil { + if err.Error() == "Container already stopped" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) postContainersWait(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, &types.ContainerWaitResponse{ + StatusCode: status, + }) +} + +func (s *Server) postContainersResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.daemon.ContainerResize(vars["name"], height, width) +} + +func (s *Server) postContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + cont, err := s.daemon.Get(vars["name"]) + if err != nil { + return err + } + + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + defer closeStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + attachWithLogsConfig := &daemon.ContainerAttachWithLogsConfig{ + InStream: inStream, + OutStream: outStream, + UseStdin: boolValue(r, "stdin"), + UseStdout: boolValue(r, "stdout"), + UseStderr: boolValue(r, "stderr"), + Logs: boolValue(r, "logs"), + Stream: boolValue(r, "stream"), + } + + if err := s.daemon.ContainerAttachWithLogs(cont, attachWithLogsConfig); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + + return nil +} + +func (s *Server) wsContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + cont, err := s.daemon.Get(vars["name"]) + if err != nil { + return err + } + + h := websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + + wsAttachWithLogsConfig := &daemon.ContainerWsAttachWithLogsConfig{ + InStream: ws, + OutStream: ws, + ErrStream: ws, + Logs: boolValue(r, "logs"), + Stream: boolValue(r, "stream"), + } + + if err := s.daemon.ContainerWsAttachWithLogs(cont, wsAttachWithLogsConfig); err != nil { + logrus.Errorf("Error attaching websocket: %s", err) + } + }) + h.ServeHTTP(w, r) + + return nil +} + +func (s *Server) getContainersByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if version.LessThan("1.20") { + containerJSONRaw, err := s.daemon.ContainerInspectPre120(vars["name"]) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, containerJSONRaw) + } + + containerJSON, err := s.daemon.ContainerInspect(vars["name"]) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, containerJSON) +} + +func (s *Server) getExecByID(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter 'id'") + } + + eConfig, err := s.daemon.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, eConfig) +} + +func (s *Server) getImagesByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + imageInspect, err := s.daemon.Repositories().Lookup(vars["name"]) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, imageInspect) +} + +func (s *Server) postBuild(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]cliconfig.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + buildConfig = builder.NewBuildConfig() + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + if boolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { + buildConfig.Remove = true + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + buildConfig.Remove = true + } else { + buildConfig.Remove = boolValue(r, "rm") + } + if boolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { + buildConfig.Pull = true + } + + output := ioutils.NewWriteFlusher(w) + buildConfig.Stdout = output + buildConfig.Context = r.Body + + buildConfig.RemoteURL = r.FormValue("remote") + buildConfig.DockerfileName = r.FormValue("dockerfile") + buildConfig.RepoName = r.FormValue("t") + buildConfig.SuppressOutput = boolValue(r, "q") + buildConfig.NoCache = boolValue(r, "nocache") + buildConfig.ForceRemove = boolValue(r, "forcerm") + buildConfig.AuthConfigs = authConfigs + buildConfig.MemorySwap = int64ValueOrZero(r, "memswap") + buildConfig.Memory = int64ValueOrZero(r, "memory") + buildConfig.CPUShares = int64ValueOrZero(r, "cpushares") + buildConfig.CPUPeriod = int64ValueOrZero(r, "cpuperiod") + buildConfig.CPUQuota = int64ValueOrZero(r, "cpuquota") + buildConfig.CPUSetCpus = r.FormValue("cpusetcpus") + buildConfig.CPUSetMems = r.FormValue("cpusetmems") + buildConfig.CgroupParent = r.FormValue("cgroupparent") + + var buildUlimits = []*ulimit.Ulimit{} + ulimitsJson := r.FormValue("ulimits") + if ulimitsJson != "" { + if err := json.NewDecoder(strings.NewReader(ulimitsJson)).Decode(&buildUlimits); err != nil { + return err + } + buildConfig.Ulimits = buildUlimits + } + + // Job cancellation. Note: not all job types support this. + if closeNotifier, ok := w.(http.CloseNotifier); ok { + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-finished: + case <-closeNotifier.CloseNotify(): + logrus.Infof("Client disconnected, cancelling job: build") + buildConfig.Cancel() + } + }() + } + + if err := builder.Build(s.daemon, buildConfig); err != nil { + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an interal error. + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + w.Write(sf.FormatError(err)) + } + return nil +} + +// postContainersCopy is deprecated in favor of getContainersArchivePath. +func (s *Server) postContainersCopy(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if err := checkForJson(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such id") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *Server) headContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := archiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.daemon.ContainerStatPath(v.name, v.path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *Server) getContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := archiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.daemon.ContainerArchivePath(v.name, v.path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *Server) putContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := archiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := boolValue(r, "noOverwriteDirNonDir") + return s.daemon.ContainerExtractToDir(v.name, v.path, noOverwriteDirNonDir, r.Body) +} + +func (s *Server) postContainerExecCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if err := checkForJson(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &runconfig.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + execConfig.Container = name + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.daemon.ContainerExecCreate(execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %s", name, err) + return err + } + + return writeJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *Server) postContainerExecStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + execName = vars["name"] + stdin io.ReadCloser + stdout io.Writer + stderr io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if !execStartCheck.Detach { + // Setting up the streaming http interface. + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + defer closeStreams(inStream, outStream) + + var errStream io.Writer + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + if !execStartCheck.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + stdin = inStream + stdout = outStream + stderr = errStream + } + // Now run the user process in container. + + if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { + logrus.Errorf("Error starting exec command in container %s: %s", execName, err) + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) postContainerExecResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.daemon.ContainerExecResize(vars["name"], height, width) +} + +func (s *Server) optionsHandler(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} +func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") +} + +func (s *Server) ping(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *Server) initTcpSocket(addr string) (l net.Listener, err error) { + if s.cfg.TLSConfig == nil || s.cfg.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert { + logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + if l, err = sockets.NewTcpSocket(addr, s.cfg.TLSConfig, s.start); err != nil { + return nil, err + } + if err := allocateDaemonPort(addr); err != nil { + return nil, err + } + return +} + +func makeHttpHandler(logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // log the request + logrus.Debugf("Calling %s %s", localMethod, localRoute) + + if logging { + logrus.Infof("%s %s", r.Method, r.RequestURI) + } + + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + + // v1.20 onwards includes the GOOS of the client after the version + // such as Docker/1.7.0 (linux) + if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { + userAgent[1] = strings.Split(userAgent[1], " ")[0] + } + + if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { + logrus.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) + } + } + version := version.Version(mux.Vars(r)["version"]) + if version == "" { + version = api.Version + } + if corsHeaders != "" { + writeCorsHeaders(w, r, corsHeaders) + } + + if version.GreaterThan(api.Version) { + http.Error(w, fmt.Errorf("client is newer than server (client API version: %s, server API version: %s)", version, api.Version).Error(), http.StatusBadRequest) + return + } + if version.LessThan(api.MinVersion) { + http.Error(w, fmt.Errorf("client is too old, minimum supported API version is %s, please upgrade your client to a newer version", api.MinVersion).Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Server", "Docker/"+dockerversion.VERSION+" ("+runtime.GOOS+")") + + if err := handlerFunc(version, w, r, mux.Vars(r)); err != nil { + logrus.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) + httpError(w, err) + } + } +} + +// we keep enableCors just for legacy usage, need to be removed in the future +func createRouter(s *Server) *mux.Router { + r := mux.NewRouter() + if os.Getenv("DEBUG") != "" { + ProfilerSetup(r, "/debug/") + } + m := map[string]map[string]HttpApiFunc{ + "HEAD": { + "/containers/{name:.*}/archive": s.headContainersArchive, + }, + "GET": { + "/_ping": s.ping, + "/events": s.getEvents, + "/info": s.getInfo, + "/version": s.getVersion, + "/images/json": s.getImagesJSON, + "/images/search": s.getImagesSearch, + "/images/get": s.getImagesGet, + "/images/{name:.*}/get": s.getImagesGet, + "/images/{name:.*}/history": s.getImagesHistory, + "/images/{name:.*}/json": s.getImagesByName, + "/containers/ps": s.getContainersJSON, + "/containers/json": s.getContainersJSON, + "/containers/{name:.*}/export": s.getContainersExport, + "/containers/{name:.*}/changes": s.getContainersChanges, + "/containers/{name:.*}/json": s.getContainersByName, + "/containers/{name:.*}/top": s.getContainersTop, + "/containers/{name:.*}/logs": s.getContainersLogs, + "/containers/{name:.*}/stats": s.getContainersStats, + "/containers/{name:.*}/attach/ws": s.wsContainersAttach, + "/exec/{id:.*}/json": s.getExecByID, + "/containers/{name:.*}/archive": s.getContainersArchive, + }, + "POST": { + "/auth": s.postAuth, + "/commit": s.postCommit, + "/build": s.postBuild, + "/images/create": s.postImagesCreate, + "/images/load": s.postImagesLoad, + "/images/{name:.*}/push": s.postImagesPush, + "/images/{name:.*}/tag": s.postImagesTag, + "/containers/create": s.postContainersCreate, + "/containers/{name:.*}/kill": s.postContainersKill, + "/containers/{name:.*}/pause": s.postContainersPause, + "/containers/{name:.*}/unpause": s.postContainersUnpause, + "/containers/{name:.*}/restart": s.postContainersRestart, + "/containers/{name:.*}/start": s.postContainersStart, + "/containers/{name:.*}/stop": s.postContainersStop, + "/containers/{name:.*}/wait": s.postContainersWait, + "/containers/{name:.*}/resize": s.postContainersResize, + "/containers/{name:.*}/attach": s.postContainersAttach, + "/containers/{name:.*}/copy": s.postContainersCopy, + "/containers/{name:.*}/exec": s.postContainerExecCreate, + "/exec/{name:.*}/start": s.postContainerExecStart, + "/exec/{name:.*}/resize": s.postContainerExecResize, + "/containers/{name:.*}/rename": s.postContainerRename, + }, + "PUT": { + "/containers/{name:.*}/archive": s.putContainersArchive, + }, + "DELETE": { + "/containers/{name:.*}": s.deleteContainers, + "/images/{name:.*}": s.deleteImages, + }, + "OPTIONS": { + "": s.optionsHandler, + }, + } + + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := s.cfg.CorsHeaders + if corsHeaders == "" && s.cfg.EnableCors { + corsHeaders = "*" + } + + for method, routes := range m { + for route, fct := range routes { + logrus.Debugf("Registering %s, %s", method, route) + // NOTE: scope issue, make sure the variables are local and won't be changed + localRoute := route + localFct := fct + localMethod := method + + // build the handler function + f := makeHttpHandler(s.cfg.Logging, localMethod, localRoute, localFct, corsHeaders, version.Version(s.cfg.Version)) + + // add the new route + if localRoute == "" { + r.Methods(localMethod).HandlerFunc(f) + } else { + r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) + r.Path(localRoute).Methods(localMethod).HandlerFunc(f) + } + } + } + + return r +} diff --git a/api/server/server_experimental.go b/api/server/server_experimental.go new file mode 100644 index 00000000..06f55013 --- /dev/null +++ b/api/server/server_experimental.go @@ -0,0 +1,17 @@ +// +build experimental + +package server + +func (s *Server) registerSubRouter() { + httpHandler := s.daemon.NetworkApiRouter() + + subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter() + subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) + subrouter = s.router.PathPrefix("/networks").Subrouter() + subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) + + subrouter = s.router.PathPrefix("/v{version:[0-9.]+}/services").Subrouter() + subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) + subrouter = s.router.PathPrefix("/services").Subrouter() + subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) +} diff --git a/api/server/server_linux.go b/api/server/server_linux.go new file mode 100644 index 00000000..f6ad26a9 --- /dev/null +++ b/api/server/server_linux.go @@ -0,0 +1,123 @@ +// +build linux + +package server + +import ( + "fmt" + "net" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/sockets" + "github.com/docker/docker/pkg/systemd" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork/portallocator" +) + +const ( + // See http://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCpuShares = 2 + linuxMaxCpuShares = 262144 +) + +// newServer sets up the required serverClosers and does protocol specific checking. +func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { + var ( + err error + ls []net.Listener + ) + switch proto { + case "fd": + ls, err = systemd.ListenFD(addr) + if err != nil { + return nil, err + } + // We don't want to start serving on these sockets until the + // daemon is initialized and installed. Otherwise required handlers + // won't be ready. + <-s.start + case "tcp": + l, err := s.initTcpSocket(addr) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, s.cfg.SocketGroup, s.start) + if err != nil { + return nil, err + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + var res []serverCloser + for _, l := range ls { + res = append(res, &HttpServer{ + &http.Server{ + Addr: addr, + Handler: s.router, + }, + l, + }) + } + return res, nil +} + +func (s *Server) AcceptConnections(d *daemon.Daemon) { + // Tell the init daemon we are accepting requests + s.daemon = d + s.registerSubRouter() + go systemd.SdNotify("READY=1") + // close the lock so the listeners start accepting connections + select { + case <-s.start: + default: + close(s.start) + } +} + +func allocateDaemonPort(addr string) error { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + intPort, err := strconv.Atoi(port) + if err != nil { + return err + } + + var hostIPs []net.IP + if parsedIP := net.ParseIP(host); parsedIP != nil { + hostIPs = append(hostIPs, parsedIP) + } else if hostIPs, err = net.LookupIP(host); err != nil { + return fmt.Errorf("failed to lookup %s address in host specification", host) + } + + pa := portallocator.Get() + for _, hostIP := range hostIPs { + if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { + return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) + } + } + return nil +} + +func adjustCpuShares(version version.Version, hostConfig *runconfig.HostConfig) { + if version.LessThan("1.19") { + if hostConfig != nil && hostConfig.CpuShares > 0 { + // Handle unsupported CpuShares + if hostConfig.CpuShares < linuxMinCpuShares { + logrus.Warnf("Changing requested CpuShares of %d to minimum allowed of %d", hostConfig.CpuShares, linuxMinCpuShares) + hostConfig.CpuShares = linuxMinCpuShares + } else if hostConfig.CpuShares > linuxMaxCpuShares { + logrus.Warnf("Changing requested CpuShares of %d to maximum allowed of %d", hostConfig.CpuShares, linuxMaxCpuShares) + hostConfig.CpuShares = linuxMaxCpuShares + } + } + } +} diff --git a/api/server/server_linux_test.go b/api/server/server_linux_test.go new file mode 100644 index 00000000..47289653 --- /dev/null +++ b/api/server/server_linux_test.go @@ -0,0 +1,68 @@ +// +build linux + +package server + +import ( + "testing" + + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" +) + +func TestAdjustCpuSharesOldApi(t *testing.T) { + apiVersion := version.Version("1.18") + hostConfig := &runconfig.HostConfig{ + CpuShares: linuxMinCpuShares - 1, + } + adjustCpuShares(apiVersion, hostConfig) + if hostConfig.CpuShares != linuxMinCpuShares { + t.Errorf("Expected CpuShares to be %d", linuxMinCpuShares) + } + + hostConfig.CpuShares = linuxMaxCpuShares + 1 + adjustCpuShares(apiVersion, hostConfig) + if hostConfig.CpuShares != linuxMaxCpuShares { + t.Errorf("Expected CpuShares to be %d", linuxMaxCpuShares) + } + + hostConfig.CpuShares = 0 + adjustCpuShares(apiVersion, hostConfig) + if hostConfig.CpuShares != 0 { + t.Error("Expected CpuShares to be unchanged") + } + + hostConfig.CpuShares = 1024 + adjustCpuShares(apiVersion, hostConfig) + if hostConfig.CpuShares != 1024 { + t.Error("Expected CpuShares to be unchanged") + } +} + +func TestAdjustCpuSharesNoAdjustment(t *testing.T) { + apiVersion := version.Version("1.19") + hostConfig := &runconfig.HostConfig{ + CpuShares: linuxMinCpuShares - 1, + } + adjustCpuShares(apiVersion, hostConfig) + if hostConfig.CpuShares != linuxMinCpuShares-1 { + t.Errorf("Expected CpuShares to be %d", linuxMinCpuShares-1) + } + + hostConfig.CpuShares = linuxMaxCpuShares + 1 + adjustCpuShares(apiVersion, hostConfig) + if hostConfig.CpuShares != linuxMaxCpuShares+1 { + t.Errorf("Expected CpuShares to be %d", linuxMaxCpuShares+1) + } + + hostConfig.CpuShares = 0 + adjustCpuShares(apiVersion, hostConfig) + if hostConfig.CpuShares != 0 { + t.Error("Expected CpuShares to be unchanged") + } + + hostConfig.CpuShares = 1024 + adjustCpuShares(apiVersion, hostConfig) + if hostConfig.CpuShares != 1024 { + t.Error("Expected CpuShares to be unchanged") + } +} diff --git a/api/server/server_stub.go b/api/server/server_stub.go new file mode 100644 index 00000000..160c2922 --- /dev/null +++ b/api/server/server_stub.go @@ -0,0 +1,6 @@ +// +build !experimental + +package server + +func (s *Server) registerSubRouter() { +} diff --git a/api/server/server_windows.go b/api/server/server_windows.go new file mode 100644 index 00000000..5105b9b8 --- /dev/null +++ b/api/server/server_windows.go @@ -0,0 +1,62 @@ +// +build windows + +package server + +import ( + "errors" + "net" + "net/http" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" +) + +// NewServer sets up the required Server and does protocol specific checking. +func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { + var ( + ls []net.Listener + ) + switch proto { + case "tcp": + l, err := s.initTcpSocket(addr) + if err != nil { + return nil, err + } + ls = append(ls, l) + + default: + return nil, errors.New("Invalid protocol format. Windows only supports tcp.") + } + + var res []serverCloser + for _, l := range ls { + res = append(res, &HttpServer{ + &http.Server{ + Addr: addr, + Handler: s.router, + }, + l, + }) + } + return res, nil + +} + +func (s *Server) AcceptConnections(d *daemon.Daemon) { + s.daemon = d + s.registerSubRouter() + // close the lock so the listeners start accepting connections + select { + case <-s.start: + default: + close(s.start) + } +} + +func allocateDaemonPort(addr string) error { + return nil +} + +func adjustCpuShares(version version.Version, hostConfig *runconfig.HostConfig) { +} diff --git a/api/types/stats.go b/api/types/stats.go new file mode 100644 index 00000000..507830ce --- /dev/null +++ b/api/types/stats.go @@ -0,0 +1,91 @@ +// This package is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// All CPU stats are aggregated since container inception. +type CpuUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage"` + SystemUsage uint64 `json:"system_cpu_usage"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` + Limit uint64 `json:"limit"` +} + +// TODO Windows: This can be factored out +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// TODO Windows: This can be factored out +type BlkioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// TODO Windows: This will require refactoring +type Network struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +type Stats struct { + Read time.Time `json:"read"` + Network Network `json:"network,omitempty"` + PreCpuStats CpuStats `json:"precpu_stats,omitempty"` + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` +} diff --git a/api/types/types.go b/api/types/types.go new file mode 100644 index 00000000..329ee96c --- /dev/null +++ b/api/types/types.go @@ -0,0 +1,278 @@ +package types + +import ( + "os" + "time" + + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" +) + +// ContainerCreateResponse contains the information returned to a client on the +// creation of a new container. +type ContainerCreateResponse struct { + // ID is the ID of the created container. + ID string `json:"Id"` + + // Warnings are any warnings encountered during the creation of the container. + Warnings []string `json:"Warnings"` +} + +// POST /containers/{name:.*}/exec +type ContainerExecCreateResponse struct { + // ID is the exec ID. + ID string `json:"Id"` +} + +// POST /auth +type AuthResponse struct { + // Status is the authentication status + Status string `json:"Status"` +} + +// POST "/containers/"+containerID+"/wait" +type ContainerWaitResponse struct { + // StatusCode is the status code of the wait job + StatusCode int `json:"StatusCode"` +} + +// POST "/commit?container="+containerID +type ContainerCommitResponse struct { + ID string `json:"Id"` +} + +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// GET "/images/json" +type Image struct { + ID string `json:"Id"` + ParentId string + RepoTags []string + RepoDigests []string + Created int + Size int + VirtualSize int + Labels map[string]string +} + +type GraphDriverData struct { + Name string + Data map[string]string +} + +// GET "/images/{name:.*}/json" +type ImageInspect struct { + Id string + Parent string + Comment string + Created string + Container string + ContainerConfig *runconfig.Config + DockerVersion string + Author string + Config *runconfig.Config + Architecture string + Os string + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData +} + +// GET "/containers/json" +type Port struct { + IP string `json:",omitempty"` + PrivatePort int + PublicPort int `json:",omitempty"` + Type string +} + +type Container struct { + ID string `json:"Id"` + Names []string + Image string + Command string + Created int + Ports []Port + SizeRw int `json:",omitempty"` + SizeRootFs int `json:",omitempty"` + Labels map[string]string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } +} + +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET /containers/{name:.*}/archive +// "name" is basename of the resource. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +type Version struct { + Version string + ApiVersion version.Version + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// GET "/info" +type Info struct { + ID string + Containers int + Images int + Driver string + DriverStatus [][2]string + MemoryLimit bool + SwapLimit bool + CpuCfsPeriod bool + CpuCfsQuota bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIp6tables bool + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + ExecutionDriver string + LoggingDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + IndexServerAddress string + RegistryConfig interface{} + InitSha1 string + InitPath string + NCPU int + MemTotal int64 + DockerRootDir string + HttpProxy string + HttpsProxy string + NoProxy string + Name string + Labels []string + ExperimentalBuild bool +} + +// This struct is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +type ContainerState struct { + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string +} + +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + Id string + Created string + Path string + Args []string + State *ContainerState + Image string + NetworkSettings *network.Settings + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + RestartCount int + Driver string + ExecDriver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *runconfig.HostConfig + GraphDriver GraphDriverData +} + +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *runconfig.Config +} + +// backcompatibility struct along with ContainerConfig +type ContainerJSONPre120 struct { + *ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig +} + +type ContainerConfig struct { + *runconfig.Config + + // backward compatibility, they now live in HostConfig + Memory int64 + MemorySwap int64 + CpuShares int64 + Cpuset string +} + +// MountPoint represents a mount point configuration inside the container. +type MountPoint struct { + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string // this is internally named `Relabel` + RW bool +} diff --git a/builder/bflag.go b/builder/bflag.go new file mode 100644 index 00000000..2a121ec3 --- /dev/null +++ b/builder/bflag.go @@ -0,0 +1,164 @@ +package builder + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags return the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// AddBool adds a bool flag to BFlags +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) + } + + } + + return nil +} diff --git a/builder/bflag_test.go b/builder/bflag_test.go new file mode 100644 index 00000000..960725dc --- /dev/null +++ b/builder/bflag_test.go @@ -0,0 +1,187 @@ +package builder + +import ( + "testing" +) + +func TestBuilderFlags(t *testing.T) { + var expected string + var err error + + // --- + + bf := NewBFlags() + bf.Args = []string{} + if err := bf.Parse(); err != nil { + t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + bf.Args = []string{"--"} + if err := bf.Parse(); err != nil { + t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + flStr1 := bf.AddString("str1", "") + flBool1 := bf.AddBool("bool1", false) + bf.Args = []string{} + if err = bf.Parse(); err != nil { + t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.IsUsed() == true { + t.Fatalf("Test3 - str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatalf("Test3 - bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "HI" { + t.Fatalf("Str1 was supposed to default to: HI") + } + if flBool1.IsTrue() { + t.Fatalf("Bool1 was supposed to default to: false") + } + if flStr1.IsUsed() == true { + t.Fatalf("Str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatalf("Bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1="} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "BYE" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatalf("Test-b1 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=true"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatalf("Test-b2 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flBool1.IsTrue() { + t.Fatalf("Test-b3 Bool1 was supposed to be false") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool2"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1", "--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "BYE" { + t.Fatalf("Teset %s, str1 should be BYE", bf.Args) + } + if !flBool1.IsTrue() { + t.Fatalf("Teset %s, bool1 should be true", bf.Args) + } +} diff --git a/builder/command/command.go b/builder/command/command.go new file mode 100644 index 00000000..93b1c8bd --- /dev/null +++ b/builder/command/command.go @@ -0,0 +1,38 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Env = "env" + Label = "label" + Maintainer = "maintainer" + Add = "add" + Copy = "copy" + From = "from" + Onbuild = "onbuild" + Workdir = "workdir" + Run = "run" + Cmd = "cmd" + Entrypoint = "entrypoint" + Expose = "expose" + Volume = "volume" + User = "user" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Env: {}, + Label: {}, + Maintainer: {}, + Add: {}, + Copy: {}, + From: {}, + Onbuild: {}, + Workdir: {}, + Run: {}, + Cmd: {}, + Entrypoint: {}, + Expose: {}, + Volume: {}, + User: {}, +} diff --git a/builder/dispatchers.go b/builder/dispatchers.go new file mode 100644 index 00000000..a810e2cc --- /dev/null +++ b/builder/dispatchers.go @@ -0,0 +1,554 @@ +package builder + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "io/ioutil" + "path" + "path/filepath" + "regexp" + "runtime" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/runconfig" +) + +const ( + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// dispatch with no layer / parsing. This is effectively not a command. +func nullDispatch(b *builder, args []string, attributes map[string]bool, original string) error { + return nil +} + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return fmt.Errorf("ENV requires at least one argument") + } + + if len(args)%2 != 0 { + // should never get here, but just in case + return fmt.Errorf("Bad input to ENV, too many args") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + // TODO/FIXME/NOT USED + // Just here to show how to use the builder flags stuff within the + // context of a builder command. Will remove once we actually add + // a builder command to something! + /* + flBool1 := b.BuilderFlags.AddBool("bool1", false) + flStr1 := b.BuilderFlags.AddString("str1", "HI") + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + fmt.Printf("Bool1:%v\n", flBool1) + fmt.Printf("Str1:%v\n", flStr1) + */ + + commitStr := "ENV" + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + gotOne := false + for i, envVar := range b.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if envParts[0] == args[j] { + b.Config.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + b.Config.Env = append(b.Config.Env, newVar) + } + j++ + } + + return b.commit("", b.Config.Cmd, commitStr) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("MAINTAINER requires exactly one argument") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + b.maintainer = args[0] + return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(b *builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return fmt.Errorf("LABEL requires at least one argument") + } + if len(args)%2 != 0 { + // should never get here, but just in case + return fmt.Errorf("Bad input to LABEL, too many args") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + + if b.Config.Labels == nil { + b.Config.Labels = map[string]string{} + } + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + b.Config.Labels[args[j]] = args[j+1] + j++ + } + return b.commit("", b.Config.Cmd, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return fmt.Errorf("ADD requires at least two arguments") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return fmt.Errorf("COPY requires at least two arguments") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("FROM requires one argument") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + name := args[0] + + if name == NoBaseImageSpecifier { + b.image = "" + b.noBaseImage = true + return nil + } + + image, err := b.Daemon.Repositories().LookupImage(name) + if b.Pull { + image, err = b.pullImage(name) + if err != nil { + return err + } + } + if err != nil { + if b.Daemon.Graph().IsNotExist(err, name) { + image, err = b.pullImage(name) + } + + // note that the top level err will still be !nil here if IsNotExist is + // not the error. This approach just simplifies the logic a bit. + if err != nil { + return err + } + } + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return fmt.Errorf("ONBUILD requires at least one argument") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") + + b.Config.OnBuild = append(b.Config.OnBuild, original) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("WORKDIR requires exactly one argument") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + // Note that workdir passed comes from the Dockerfile. Hence it is in + // Linux format using forward-slashes, even on Windows. However, + // b.Config.WorkingDir is in platform-specific notation (in other words + // on Windows will use `\` + workdir := args[0] + + isAbs := false + if runtime.GOOS == "windows" { + // Alternate processing for Windows here is necessary as we can't call + // filepath.IsAbs(workDir) as that would verify Windows style paths, + // along with drive-letters (eg c:\pathto\file.txt). We (arguably + // correctly or not) check for both forward and back slashes as this + // is what the 1.4.2 GoLang implementation of IsAbs() does in the + // isSlash() function. + isAbs = workdir[0] == '\\' || workdir[0] == '/' + } else { + isAbs = filepath.IsAbs(workdir) + } + + if !isAbs { + current := b.Config.WorkingDir + if runtime.GOOS == "windows" { + // Convert to Linux format before join + current = strings.Replace(current, "\\", "/", -1) + } + // Must use path.Join so works correctly on Windows, not filepath + workdir = path.Join("/", current, workdir) + } + + // Convert to platform specific format + if runtime.GOOS == "windows" { + workdir = strings.Replace(workdir, "/", "\\", -1) + } + b.Config.WorkingDir = workdir + + return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is +// only one argument. The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *builder, args []string, attributes map[string]bool, original string) error { + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + args = handleJSONArgs(args, attributes) + + if !attributes["json"] { + if runtime.GOOS != "windows" { + args = append([]string{"/bin/sh", "-c"}, args...) + } else { + args = append([]string{"cmd", "/S /C"}, args...) + } + } + + runCmd := flag.NewFlagSet("run", flag.ContinueOnError) + runCmd.SetOutput(ioutil.Discard) + runCmd.Usage = nil + + config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...)) + if err != nil { + return err + } + + cmd := b.Config.Cmd + // set Cmd manually, this is special case only for Dockerfiles + b.Config.Cmd = config.Cmd + runconfig.Merge(b.Config, config) + + defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd) + + logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + c, err := b.create() + if err != nil { + return err + } + + // Ensure that we keep the container mounted until the commit + // to avoid unmounting and then mounting directly again + c.Mount() + defer c.Unmount() + + err = b.run(c) + if err != nil { + return err + } + if err := b.commit(c.ID, cmd, "run"); err != nil { + return err + } + + return nil +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *builder, args []string, attributes map[string]bool, original string) error { + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + cmdSlice := handleJSONArgs(args, attributes) + + if !attributes["json"] { + if runtime.GOOS != "windows" { + cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...) + } else { + cmdSlice = append([]string{"cmd", "/S /C"}, cmdSlice...) + } + } + + b.Config.Cmd = runconfig.NewCommand(cmdSlice...) + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(args) != 0 { + b.cmdSet = true + } + + return nil +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to +// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx. +// +// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *builder, args []string, attributes map[string]bool, original string) error { + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + parsed := handleJSONArgs(args, attributes) + + switch { + case attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + b.Config.Entrypoint = runconfig.NewEntrypoint(parsed...) + case len(parsed) == 0: + // ENTRYPOINT [] + b.Config.Entrypoint = nil + default: + // ENTRYPOINT echo hi + if runtime.GOOS != "windows" { + b.Config.Entrypoint = runconfig.NewEntrypoint("/bin/sh", "-c", parsed[0]) + } else { + b.Config.Entrypoint = runconfig.NewEntrypoint("cmd", "/S /C", parsed[0]) + } + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !b.cmdSet { + b.Config.Cmd = nil + } + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil { + return err + } + + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.Config.ExposedPorts for runconfig. +// +func expose(b *builder, args []string, attributes map[string]bool, original string) error { + portsTab := args + + if len(args) == 0 { + return fmt.Errorf("EXPOSE requires at least one argument") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + if b.Config.ExposedPorts == nil { + b.Config.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := b.Config.ExposedPorts[port]; !exists { + b.Config.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *builder, args []string, attributes map[string]bool, original string) error { + if runtime.GOOS == "windows" { + return fmt.Errorf("USER is not supported on Windows") + } + + if len(args) != 1 { + return fmt.Errorf("USER requires exactly one argument") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + b.Config.User = args[0] + return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(b *builder, args []string, attributes map[string]bool, original string) error { + if runtime.GOOS == "windows" { + return fmt.Errorf("VOLUME is not supported on Windows") + } + if len(args) == 0 { + return fmt.Errorf("VOLUME requires at least one argument") + } + + if err := b.BuilderFlags.Parse(); err != nil { + return err + } + + if b.Config.Volumes == nil { + b.Config.Volumes = map[string]struct{}{} + } + for _, v := range args { + v = strings.TrimSpace(v) + if v == "" { + return fmt.Errorf("Volume specified can not be an empty string") + } + b.Config.Volumes[v] = struct{}{} + } + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { + return err + } + return nil +} diff --git a/builder/evaluator.go b/builder/evaluator.go new file mode 100644 index 00000000..0a59a05d --- /dev/null +++ b/builder/evaluator.go @@ -0,0 +1,351 @@ +// Package builder is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of resposibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package builder + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/builder/command" + "github.com/docker/docker/builder/parser" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/ulimit" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]struct{}{ + command.Env: {}, + command.Label: {}, + command.Add: {}, + command.Copy: {}, + command.Workdir: {}, + command.Expose: {}, + command.Volume: {}, + command.User: {}, +} + +var evaluateTable map[string]func(*builder, []string, map[string]bool, string) error + +func init() { + evaluateTable = map[string]func(*builder, []string, map[string]bool, string) error{ + command.Env: env, + command.Label: label, + command.Maintainer: maintainer, + command.Add: add, + command.Copy: dispatchCopy, // copy() is a go builtin + command.From: from, + command.Onbuild: onbuild, + command.Workdir: workdir, + command.Run: run, + command.Cmd: cmd, + command.Entrypoint: entrypoint, + command.Expose: expose, + command.Volume: volume, + command.User: user, + } +} + +// builder is an internal struct, used to maintain configuration of the Dockerfile's +// processing as it evaluates the parsing result. +type builder struct { + Daemon *daemon.Daemon + + // effectively stdio for the run. Because it is not stdio, I said + // "Effectively". Do not use stdio anywhere in this package for any reason. + OutStream io.Writer + ErrStream io.Writer + + Verbose bool + UtilizeCache bool + cacheBusted bool + + // controls how images and containers are handled between steps. + Remove bool + ForceRemove bool + Pull bool + + // set this to true if we want the builder to not commit between steps. + // This is useful when we only want to use the evaluator table to generate + // the final configs of the Dockerfile but dont want the layers + disableCommit bool + + // Registry server auth configs used to pull images when handling `FROM`. + AuthConfigs map[string]cliconfig.AuthConfig + + // Deprecated, original writer used for ImagePull. To be removed. + OutOld io.Writer + StreamFormatter *streamformatter.StreamFormatter + + Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. + + // both of these are controlled by the Remove and ForceRemove options in BuildOpts + TmpContainers map[string]struct{} // a map of containers used for removes + + dockerfileName string // name of Dockerfile + dockerfile *parser.Node // the syntax tree of the dockerfile + image string // image name for commit processing + maintainer string // maintainer name. could probably be removed. + cmdSet bool // indicates is CMD was set in current Dockerfile + BuilderFlags *BFlags // current cmd's BuilderFlags - temporary + context tarsum.TarSum // the context is a tarball that is uploaded by the client + contextPath string // the path of the temporary directory the local context is unpacked to (server side) + noBaseImage bool // indicates that this build does not start from any base image, but is being built from an empty file system. + + // Set resource restrictions for build containers + cpuSetCpus string + cpuSetMems string + cpuShares int64 + cpuPeriod int64 + cpuQuota int64 + cgroupParent string + memory int64 + memorySwap int64 + ulimits []*ulimit.Ulimit + + cancelled <-chan struct{} // When closed, job was cancelled. + + activeImages []string + id string // Used to hold reference images +} + +// Run the builder with the context. This is the lynchpin of this package. This +// will (barring errors): +// +// * call readContext() which will set up the temporary directory and unpack +// the context into it. +// * read the dockerfile +// * parse the dockerfile +// * walk the parse tree and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Print a happy message and return the image ID. +// +func (b *builder) Run(context io.Reader) (string, error) { + if err := b.readContext(context); err != nil { + return "", err + } + + defer func() { + if err := os.RemoveAll(b.contextPath); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %s", err) + } + }() + + if err := b.readDockerfile(); err != nil { + return "", err + } + + // some initializations that would not have been supplied by the caller. + b.Config = &runconfig.Config{} + + b.TmpContainers = map[string]struct{}{} + + for i, n := range b.dockerfile.Children { + select { + case <-b.cancelled: + logrus.Debug("Builder: build cancelled!") + fmt.Fprintf(b.OutStream, "Build cancelled") + return "", fmt.Errorf("Build cancelled") + default: + // Not cancelled yet, keep going... + } + if err := b.dispatch(i, n); err != nil { + if b.ForceRemove { + b.clearTmp() + } + return "", err + } + fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image)) + if b.Remove { + b.clearTmp() + } + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") + } + + fmt.Fprintf(b.OutStream, "Successfully built %s\n", stringid.TruncateID(b.image)) + return b.image, nil +} + +// Reads a Dockerfile from the current context. It assumes that the +// 'filename' is a relative path from the root of the context +func (b *builder) readDockerfile() error { + // If no -f was specified then look for 'Dockerfile'. If we can't find + // that then look for 'dockerfile'. If neither are found then default + // back to 'Dockerfile' and use that in the error message. + if b.dockerfileName == "" { + b.dockerfileName = api.DefaultDockerfileName + tmpFN := filepath.Join(b.contextPath, api.DefaultDockerfileName) + if _, err := os.Lstat(tmpFN); err != nil { + tmpFN = filepath.Join(b.contextPath, strings.ToLower(api.DefaultDockerfileName)) + if _, err := os.Lstat(tmpFN); err == nil { + b.dockerfileName = strings.ToLower(api.DefaultDockerfileName) + } + } + } + + origFile := b.dockerfileName + + filename, err := symlink.FollowSymlinkInScope(filepath.Join(b.contextPath, origFile), b.contextPath) + if err != nil { + return fmt.Errorf("The Dockerfile (%s) must be within the build context", origFile) + } + + fi, err := os.Lstat(filename) + if os.IsNotExist(err) { + return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile) + } + if fi.Size() == 0 { + return fmt.Errorf("The Dockerfile (%s) cannot be empty", origFile) + } + + f, err := os.Open(filename) + if err != nil { + return err + } + + b.dockerfile, err = parser.Parse(f) + f.Close() + + if err != nil { + return err + } + + // After the Dockerfile has been parsed, we need to check the .dockerignore + // file for either "Dockerfile" or ".dockerignore", and if either are + // present then erase them from the build context. These files should never + // have been sent from the client but we did send them to make sure that + // we had the Dockerfile to actually parse, and then we also need the + // .dockerignore file to know whether either file should be removed. + // Note that this assumes the Dockerfile has been read into memory and + // is now safe to be removed. + + excludes, _ := utils.ReadDockerIgnore(filepath.Join(b.contextPath, ".dockerignore")) + if rm, _ := fileutils.Matches(".dockerignore", excludes); rm == true { + os.Remove(filepath.Join(b.contextPath, ".dockerignore")) + b.context.(tarsum.BuilderContext).Remove(".dockerignore") + } + if rm, _ := fileutils.Matches(b.dockerfileName, excludes); rm == true { + os.Remove(filepath.Join(b.contextPath, b.dockerfileName)) + b.context.(tarsum.BuilderContext).Remove(b.dockerfileName) + } + + return nil +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statmeent, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *builder) dispatch(stepN int, ast *parser.Node) error { + cmd := ast.Value + attrs := ast.Attributes + original := ast.Original + flags := ast.Flags + strs := []string{} + msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd)) + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + if cmd == "onbuild" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + strs = append(strs, ast.Value) + msg += " " + ast.Value + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + } + + // count the number of nodes that we are going to traverse first + // so we can pre-create the argument and message array. This speeds up the + // allocation of those list a lot when they have a lot of arguments + cursor := ast + var n int + for cursor.Next != nil { + cursor = cursor.Next + n++ + } + l := len(strs) + strList := make([]string, n+l) + copy(strList, strs) + msgList := make([]string, n) + + var i int + for ast.Next != nil { + ast = ast.Next + var str string + str = ast.Value + if _, ok := replaceEnvAllowed[cmd]; ok { + var err error + str, err = ProcessWord(ast.Value, b.Config.Env) + if err != nil { + return err + } + } + strList[i+l] = str + msgList[i] = ast.Value + i++ + } + + msg += " " + strings.Join(msgList, " ") + fmt.Fprintln(b.OutStream, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + b.BuilderFlags = NewBFlags() + b.BuilderFlags.Args = flags + return f(b, strList, attrs, original) + } + + return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd)) +} diff --git a/builder/internals.go b/builder/internals.go new file mode 100644 index 00000000..a8e14160 --- /dev/null +++ b/builder/internals.go @@ -0,0 +1,808 @@ +package builder + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder/parser" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" +) + +func (b *builder) readContext(context io.Reader) (err error) { + tmpdirPath, err := ioutil.TempDir("", "docker-build") + if err != nil { + return + } + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + if e := os.RemoveAll(tmpdirPath); e != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e) + } + } + }() + + decompressedStream, err := archive.DecompressStream(context) + if err != nil { + return + } + + if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil { + return + } + + if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil { + return + } + + b.contextPath = tmpdirPath + return +} + +func (b *builder) commit(id string, autoCmd *runconfig.Command, comment string) error { + if b.disableCommit { + return nil + } + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.Config.Image = b.image + if id == "" { + cmd := b.Config.Cmd + if runtime.GOOS != "windows" { + b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment) + } else { + b.Config.Cmd = runconfig.NewCommand("cmd", "/S /C", "REM (nop) "+comment) + } + defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + container, err := b.create() + if err != nil { + return err + } + id = container.ID + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + } + container, err := b.Daemon.Get(id) + if err != nil { + return err + } + + // Note: Actually copy the struct + autoConfig := *b.Config + autoConfig.Cmd = autoCmd + + commitCfg := &daemon.ContainerCommitConfig{ + Author: b.maintainer, + Pause: true, + Config: &autoConfig, + } + + // Commit the container + image, err := b.Daemon.Commit(container, commitCfg) + if err != nil { + return err + } + b.Daemon.Graph().Retain(b.id, image.ID) + b.activeImages = append(b.activeImages, image.ID) + b.image = image.ID + return nil +} + +type copyInfo struct { + origPath string + destPath string + hash string + decompress bool + tmpDir string +} + +func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) < 2 { + return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + // Work in daemon-specific filepath semantics + dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest + + copyInfos := []*copyInfo{} + + b.Config.Image = b.image + + defer func() { + for _, ci := range copyInfos { + if ci.tmpDir != "" { + os.RemoveAll(ci.tmpDir) + } + } + }() + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + for _, orig := range args[0 : len(args)-1] { + if err := calcCopyInfo( + b, + cmdName, + ©Infos, + orig, + dest, + allowRemote, + allowDecompression, + true, + ); err != nil { + return err + } + } + + if len(copyInfos) == 0 { + return fmt.Errorf("No source files were specified") + } + if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { + return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one CI then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + var origPaths string + + if len(copyInfos) == 1 { + srcHash = copyInfos[0].hash + origPaths = copyInfos[0].origPath + } else { + var hashs []string + var origs []string + for _, ci := range copyInfos { + hashs = append(hashs, ci.hash) + origs = append(origs, ci.origPath) + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + origPaths = strings.Join(origs, " ") + } + + cmd := b.Config.Cmd + if runtime.GOOS != "windows" { + b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)) + } else { + b.Config.Cmd = runconfig.NewCommand("cmd", "/S /C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)) + } + defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + + if hit { + return nil + } + + container, _, err := b.Daemon.Create(b.Config, nil, "") + if err != nil { + return err + } + b.TmpContainers[container.ID] = struct{}{} + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + + if err := container.PrepareStorage(); err != nil { + return err + } + + for _, ci := range copyInfos { + if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil { + return err + } + } + + if err := container.CleanupStorage(); err != nil { + return err + } + + if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil { + return err + } + return nil +} + +func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error { + + // Work in daemon-specific OS filepath semantics. However, we save + // the the origPath passed in here, as it might also be a URL which + // we need to check for in this function. + passedInOrigPath := origPath + origPath = filepath.FromSlash(origPath) + destPath = filepath.FromSlash(destPath) + + if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // Twiddle the destPath when its a relative path - meaning, make it + // relative to the WORKINGDIR + if !filepath.IsAbs(destPath) { + hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator)) + destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath) + + // Make sure we preserve any trailing slash + if hasSlash { + destPath += string(os.PathSeparator) + } + } + + // In the remote/URL case, download it and gen its hashcode + if urlutil.IsURL(passedInOrigPath) { + + // As it's a URL, we go back to processing on what was passed in + // to this function + origPath = passedInOrigPath + + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + + ci := copyInfo{} + ci.origPath = origPath + ci.hash = origPath // default to this but can change + ci.destPath = destPath + ci.decompress = false + *cInfos = append(*cInfos, &ci) + + // Initiate the download + resp, err := httputils.Download(ci.origPath) + if err != nil { + return err + } + + // Create a tmp dir + tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") + if err != nil { + return err + } + ci.tmpDir = tmpDirName + + // Create a tmp file within our tmp dir + tmpFileName := filepath.Join(tmpDirName, "tmp") + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + + // Download and dump result to tmp file + if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{ + In: resp.Body, + Out: b.OutOld, + Formatter: b.StreamFormatter, + Size: int(resp.ContentLength), + NewLines: true, + ID: "", + Action: "Downloading", + })); err != nil { + tmpFile.Close() + return err + } + fmt.Fprintf(b.OutStream, "\n") + tmpFile.Close() + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + times := make([]syscall.Timespec, 2) + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + mTime, err := http.ParseTime(lastMod) + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if err == nil { + times[1] = syscall.NsecToTimespec(mTime.UnixNano()) + } + } + + if err := system.UtimesNano(tmpFileName, times); err != nil { + return err + } + + ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) + + // If the destination is a directory, figure out the filename. + if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) { + u, err := url.Parse(origPath) + if err != nil { + return err + } + path := u.Path + if strings.HasSuffix(path, string(os.PathSeparator)) { + path = path[:len(path)-1] + } + parts := strings.Split(path, string(os.PathSeparator)) + filename := parts[len(parts)-1] + if filename == "" { + return fmt.Errorf("cannot determine filename from url: %s", u) + } + ci.destPath = ci.destPath + filename + } + + // Calc the checksum, even if we're using the cache + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return err + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + if err != nil { + return err + } + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return err + } + ci.hash = tarSum.Sum(nil) + r.Close() + + return nil + } + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + for _, fileInfo := range b.context.GetSums() { + if fileInfo.Name() == "" { + continue + } + match, _ := filepath.Match(origPath, fileInfo.Name()) + if !match { + continue + } + + // Note we set allowWildcards to false in case the name has + // a * in it + calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false) + } + return nil + } + + // Must be a dir or a file + + if err := b.checkPathForAddition(origPath); err != nil { + return err + } + fi, _ := os.Stat(filepath.Join(b.contextPath, origPath)) + + ci := copyInfo{} + ci.origPath = origPath + ci.hash = origPath + ci.destPath = destPath + ci.decompress = allowDecompression + *cInfos = append(*cInfos, &ci) + + // Deal with the single file case + if !fi.IsDir() { + // This will match first file in sums of the archive + fis := b.context.GetSums().GetFile(ci.origPath) + if fis != nil { + ci.hash = "file:" + fis.Sum() + } + return nil + } + + // Must be a dir + var subfiles []string + absOrigPath := filepath.Join(b.contextPath, ci.origPath) + + // Add a trailing / to make sure we only pick up nested files under + // the dir and not sibling files of the dir that just happen to + // start with the same chars + if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) { + absOrigPath += string(os.PathSeparator) + } + + // Need path w/o slash too to find matching dir w/o trailing slash + absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] + + for _, fileInfo := range b.context.GetSums() { + absFile := filepath.Join(b.contextPath, fileInfo.Name()) + // Any file in the context that starts with the given path will be + // picked up and its hashcode used. However, we'll exclude the + // root dir itself. We do this for a coupel of reasons: + // 1 - ADD/COPY will not copy the dir itself, just its children + // so there's no reason to include it in the hash calc + // 2 - the metadata on the dir will change when any child file + // changes. This will lead to a miss in the cache check if that + // child file is in the .dockerignore list. + if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { + subfiles = append(subfiles, fileInfo.Sum()) + } + } + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) + + return nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func (b *builder) pullImage(name string) (*image.Image, error) { + remote, tag := parsers.ParseRepositoryTag(name) + if tag == "" { + tag = "latest" + } + + pullRegistryAuth := &cliconfig.AuthConfig{} + if len(b.AuthConfigs) > 0 { + // The request came with a full auth config file, we prefer to use that + repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig( + &cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs}, + repoInfo.Index, + ) + pullRegistryAuth = &resolvedConfig + } + + imagePullConfig := &graph.ImagePullConfig{ + AuthConfig: pullRegistryAuth, + OutStream: ioutils.NopWriteCloser(b.OutOld), + } + + if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil { + return nil, err + } + + image, err := b.Daemon.Repositories().LookupImage(name) + if err != nil { + return nil, err + } + + return image, nil +} + +func (b *builder) processImageFrom(img *image.Image) error { + b.image = img.ID + + if img.Config != nil { + b.Config = img.Config + } + + // The default path will be blank on Windows (set by HCS) + if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" { + b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.Config.OnBuild); nTriggers != 0 { + fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := b.Config.OnBuild + b.Config.OnBuild = []string{} + + // parse the ONBUILD triggers by invoking the parser + for stepN, step := range onBuildTriggers { + ast, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for i, n := range ast.Children { + switch strings.ToUpper(n.Value) { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) + } + + fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step) + + if err := b.dispatch(i, n); err != nil { + return err + } + } + } + + return nil +} + +// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) +// and if so attempts to look up the current `b.image` and `b.Config` pair +// in the current server `b.Daemon`. If an image is found, probeCache returns +// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there +// is any error, it returns `(false, err)`. +func (b *builder) probeCache() (bool, error) { + if !b.UtilizeCache || b.cacheBusted { + return false, nil + } + + cache, err := b.Daemon.ImageGetCached(b.image, b.Config) + if err != nil { + return false, err + } + if cache == nil { + logrus.Debugf("[BUILDER] Cache miss") + b.cacheBusted = true + return false, nil + } + + fmt.Fprintf(b.OutStream, " ---> Using cache\n") + logrus.Debugf("[BUILDER] Use cached version") + b.image = cache.ID + b.Daemon.Graph().Retain(b.id, cache.ID) + b.activeImages = append(b.activeImages, cache.ID) + return true, nil +} + +func (b *builder) create() (*daemon.Container, error) { + if b.image == "" && !b.noBaseImage { + return nil, fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.Config.Image = b.image + + hostConfig := &runconfig.HostConfig{ + CpuShares: b.cpuShares, + CpuPeriod: b.cpuPeriod, + CpuQuota: b.cpuQuota, + CpusetCpus: b.cpuSetCpus, + CpusetMems: b.cpuSetMems, + CgroupParent: b.cgroupParent, + Memory: b.memory, + MemorySwap: b.memorySwap, + Ulimits: b.ulimits, + } + + config := *b.Config + + // Create the container + c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "") + if err != nil { + return nil, err + } + for _, warning := range warnings { + fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) + } + + b.TmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID)) + + if config.Cmd.Len() > 0 { + // override the entry point that may have been picked up from the base image + s := config.Cmd.Slice() + c.Path = s[0] + c.Args = s[1:] + } else { + config.Cmd = runconfig.NewCommand() + } + + return c, nil +} + +func (b *builder) run(c *daemon.Container) error { + var errCh chan error + if b.Verbose { + errCh = c.Attach(nil, b.OutStream, b.ErrStream) + } + + //start the container + if err := c.Start(); err != nil { + return err + } + + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-b.cancelled: + logrus.Debugln("Build cancelled, killing container:", c.ID) + c.Kill() + case <-finished: + } + }() + + if b.Verbose { + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + return err + } + } + + // Wait for it to finish + if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 { + return &jsonmessage.JSONError{ + Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret), + Code: ret, + } + } + + return nil +} + +func (b *builder) checkPathForAddition(orig string) error { + origPath := filepath.Join(b.contextPath, orig) + origPath, err := filepath.EvalSymlinks(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + contextPath, err := filepath.EvalSymlinks(b.contextPath) + if err != nil { + return err + } + if !strings.HasPrefix(origPath, contextPath) { + return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) + } + if _, err := os.Stat(origPath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + return nil +} + +func (b *builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error { + var ( + err error + destExists = true + origPath = filepath.Join(b.contextPath, orig) + destPath string + ) + + // Work in daemon-local OS specific file paths + dest = filepath.FromSlash(dest) + + destPath, err = container.GetResourcePath(dest) + if err != nil { + return err + } + + // Preserve the trailing slash + if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." { + destPath = destPath + string(os.PathSeparator) + } + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) + return err + } + destExists = false + } + + fi, err := os.Stat(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + + if fi.IsDir() { + return copyAsDirectory(origPath, destPath, destExists) + } + + // If we are adding a remote file (or we've been told not to decompress), do not try to untar it + if decompress { + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in slash + tarDest := destPath + if strings.HasSuffix(tarDest, string(os.PathSeparator)) { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + if err := chrootarchive.UntarPath(origPath, tarDest); err == nil { + return nil + } else if err != io.EOF { + logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) + } + } + + if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil { + return err + } + if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil { + return err + } + + resPath := destPath + if destExists && destStat.IsDir() { + resPath = filepath.Join(destPath, filepath.Base(origPath)) + } + + return fixPermissions(origPath, resPath, 0, 0, destExists) +} + +func copyAsDirectory(source, destination string, destExisted bool) error { + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + return fixPermissions(source, destination, 0, 0, destExisted) +} + +func (b *builder) clearTmp() { + for c := range b.TmpContainers { + rmConfig := &daemon.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := b.Daemon.ContainerRm(c, rmConfig); err != nil { + fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) + return + } + delete(b.TmpContainers, c) + fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c)) + } +} diff --git a/builder/internals_linux.go b/builder/internals_linux.go new file mode 100644 index 00000000..76308c68 --- /dev/null +++ b/builder/internals_linux.go @@ -0,0 +1,40 @@ +// +build linux + +package builder + +import ( + "os" + "path/filepath" +) + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. + return err + } + doChownDestination := !destExisted || !destStat.IsDir() + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) + }) +} diff --git a/builder/internals_windows.go b/builder/internals_windows.go new file mode 100644 index 00000000..5d9d35e3 --- /dev/null +++ b/builder/internals_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package builder + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // chown is not supported on Windows + return nil +} diff --git a/builder/job.go b/builder/job.go new file mode 100644 index 00000000..38b7ddcd --- /dev/null +++ b/builder/job.go @@ -0,0 +1,355 @@ +package builder + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + + "github.com/docker/docker/api" + "github.com/docker/docker/builder/parser" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/graph/tags" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/ulimit" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +// whitelist of commands allowed for a commit/import +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +// Config contains all configs for a build job +type Config struct { + DockerfileName string + RemoteURL string + RepoName string + SuppressOutput bool + NoCache bool + Remove bool + ForceRemove bool + Pull bool + Memory int64 + MemorySwap int64 + CPUShares int64 + CPUPeriod int64 + CPUQuota int64 + CPUSetCpus string + CPUSetMems string + CgroupParent string + Ulimits []*ulimit.Ulimit + AuthConfigs map[string]cliconfig.AuthConfig + + Stdout io.Writer + Context io.ReadCloser + // When closed, the job has been cancelled. + // Note: not all jobs implement cancellation. + // See Job.Cancel() and Job.WaitCancelled() + cancelled chan struct{} + cancelOnce sync.Once +} + +// Cancel signals the build job to cancel +func (b *Config) Cancel() { + b.cancelOnce.Do(func() { + close(b.cancelled) + }) +} + +// WaitCancelled returns a channel which is closed ("never blocks") when +// the job is cancelled. +func (b *Config) WaitCancelled() <-chan struct{} { + return b.cancelled +} + +// NewBuildConfig returns a new Config struct +func NewBuildConfig() *Config { + return &Config{ + AuthConfigs: map[string]cliconfig.AuthConfig{}, + cancelled: make(chan struct{}), + } +} + +// Build is the main interface of the package, it gathers the Builder +// struct and calls builder.Run() to do all the real build job. +func Build(d *daemon.Daemon, buildConfig *Config) error { + var ( + repoName string + tag string + context io.ReadCloser + ) + sf := streamformatter.NewJSONStreamFormatter() + + repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) + if repoName != "" { + if err := registry.ValidateRepositoryName(repoName); err != nil { + return err + } + if len(tag) > 0 { + if err := tags.ValidateTagName(tag); err != nil { + return err + } + } + } + + if buildConfig.RemoteURL == "" { + context = ioutil.NopCloser(buildConfig.Context) + } else if urlutil.IsGitURL(buildConfig.RemoteURL) { + root, err := utils.GitClone(buildConfig.RemoteURL) + if err != nil { + return err + } + defer os.RemoveAll(root) + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return err + } + context = c + } else if urlutil.IsURL(buildConfig.RemoteURL) { + f, err := httputils.Download(buildConfig.RemoteURL) + if err != nil { + return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err) + } + defer f.Body.Close() + ct := f.Header.Get("Content-Type") + clen := int(f.ContentLength) + contentType, bodyReader, err := inspectResponse(ct, f.Body, clen) + + defer bodyReader.Close() + + if err != nil { + return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err) + } + if contentType == httputils.MimeTypes.TextPlain { + dockerFile, err := ioutil.ReadAll(bodyReader) + if err != nil { + return err + } + + // When we're downloading just a Dockerfile put it in + // the default name - don't allow the client to move/specify it + buildConfig.DockerfileName = api.DefaultDockerfileName + + c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) + if err != nil { + return err + } + context = c + } else { + // Pass through - this is a pre-packaged context, presumably + // with a Dockerfile with the right name inside it. + prCfg := progressreader.Config{ + In: bodyReader, + Out: buildConfig.Stdout, + Formatter: sf, + Size: clen, + NewLines: true, + ID: "Downloading context", + Action: buildConfig.RemoteURL, + } + context = progressreader.New(prCfg) + } + } + + defer context.Close() + + builder := &builder{ + Daemon: d, + OutStream: &streamformatter.StdoutFormater{ + Writer: buildConfig.Stdout, + StreamFormatter: sf, + }, + ErrStream: &streamformatter.StderrFormater{ + Writer: buildConfig.Stdout, + StreamFormatter: sf, + }, + Verbose: !buildConfig.SuppressOutput, + UtilizeCache: !buildConfig.NoCache, + Remove: buildConfig.Remove, + ForceRemove: buildConfig.ForceRemove, + Pull: buildConfig.Pull, + OutOld: buildConfig.Stdout, + StreamFormatter: sf, + AuthConfigs: buildConfig.AuthConfigs, + dockerfileName: buildConfig.DockerfileName, + cpuShares: buildConfig.CPUShares, + cpuPeriod: buildConfig.CPUPeriod, + cpuQuota: buildConfig.CPUQuota, + cpuSetCpus: buildConfig.CPUSetCpus, + cpuSetMems: buildConfig.CPUSetMems, + cgroupParent: buildConfig.CgroupParent, + memory: buildConfig.Memory, + memorySwap: buildConfig.MemorySwap, + ulimits: buildConfig.Ulimits, + cancelled: buildConfig.WaitCancelled(), + id: stringid.GenerateRandomID(), + } + + defer func() { + builder.Daemon.Graph().Release(builder.id, builder.activeImages...) + }() + + id, err := builder.Run(context) + if err != nil { + return err + } + if repoName != "" { + return d.Repositories().Tag(repoName, tag, id, true) + } + return nil +} + +// BuildFromConfig will do build directly from parameter 'changes', which comes +// from Dockerfile entries, it will: +// +// - call parse.Parse() to get AST root from Dockerfile entries +// - do build by calling builder.dispatch() to call all entries' handling routines +func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) { + ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) + if err != nil { + return nil, err + } + + // ensure that the commands are valid + for _, n := range ast.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + builder := &builder{ + Daemon: d, + Config: c, + OutStream: ioutil.Discard, + ErrStream: ioutil.Discard, + disableCommit: true, + } + + for i, n := range ast.Children { + if err := builder.dispatch(i, n); err != nil { + return nil, err + } + } + + return builder.Config, nil +} + +// CommitConfig contains build configs for commit operation +type CommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + Changes []string + Config *runconfig.Config +} + +// Commit will create a new image from a container's changes +func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) { + container, err := d.Get(name) + if err != nil { + return "", err + } + + if c.Config == nil { + c.Config = &runconfig.Config{} + } + + newConfig, err := BuildFromConfig(d, c.Config, c.Changes) + if err != nil { + return "", err + } + + if err := runconfig.Merge(newConfig, container.Config); err != nil { + return "", err + } + + commitCfg := &daemon.ContainerCommitConfig{ + Pause: c.Pause, + Repo: c.Repo, + Tag: c.Tag, + Author: c.Author, + Comment: c.Comment, + Config: newConfig, + } + + img, err := d.Commit(container, commitCfg) + if err != nil { + return "", err + } + + return img.ID, nil +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.ReadCloser, clen int) (string, io.ReadCloser, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("Empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble) + bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { + contentType, _, err = httputils.DetectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} diff --git a/builder/job_test.go b/builder/job_test.go new file mode 100644 index 00000000..9ce74b22 --- /dev/null +++ b/builder/job_test.go @@ -0,0 +1,113 @@ +package builder + +import ( + "bytes" + "io/ioutil" + "testing" +) + +var textPlainDockerfile = "FROM busybox" +var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic + +func TestInspectEmptyResponse(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader([]byte(""))) + contentType, bReader, err := inspectResponse(ct, br, 0) + if err == nil { + t.Fatalf("Should have generated an error for an empty response") + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != 0 { + t.Fatal("response body should remain empty") + } +} + +func TestInspectResponseBinary(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader(binaryContext)) + contentType, bReader, err := inspectResponse(ct, br, len(binaryContext)) + if err != nil { + t.Fatal(err) + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != len(binaryContext) { + t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) + } + for i := range body { + if body[i] != binaryContext[i] { + t.Fatalf("Corrupted response body at byte index %d", i) + } + } +} + +func TestResponseUnsupportedContentType(t *testing.T) { + content := []byte(textPlainDockerfile) + ct := "application/json" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, len(textPlainDockerfile)) + + if err == nil { + t.Fatal("Should have returned an error on content-type 'application/json'") + } + if contentType != ct { + t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != textPlainDockerfile { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseTextSimple(t *testing.T) { + content := []byte(textPlainDockerfile) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, len(content)) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != textPlainDockerfile { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseEmptyContentType(t *testing.T) { + content := []byte(textPlainDockerfile) + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bodyReader, err := inspectResponse("", br, len(content)) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bodyReader) + if err != nil { + t.Fatal(err) + } + if string(body) != textPlainDockerfile { + t.Fatalf("Corrupted response body %s", body) + } +} diff --git a/builder/parser/dumper/main.go b/builder/parser/dumper/main.go new file mode 100644 index 00000000..33202b70 --- /dev/null +++ b/builder/parser/dumper/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + + ast, err := parser.Parse(f) + if err != nil { + panic(err) + } else { + fmt.Println(ast.Dump()) + } + } +} diff --git a/builder/parser/json_test.go b/builder/parser/json_test.go new file mode 100644 index 00000000..a256f845 --- /dev/null +++ b/builder/parser/json_test.go @@ -0,0 +1,55 @@ +package parser + +import ( + "testing" +) + +var invalidJSONArraysOfStrings = []string{ + `["a",42,"b"]`, + `["a",123.456,"b"]`, + `["a",{},"b"]`, + `["a",{"c": "d"},"b"]`, + `["a",["c"],"b"]`, + `["a",true,"b"]`, + `["a",false,"b"]`, + `["a",null,"b"]`, +} + +var validJSONArraysOfStrings = map[string][]string{ + `[]`: {}, + `[""]`: {""}, + `["a"]`: {"a"}, + `["a","b"]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + ` [ "a", "b" ] `: {"a", "b"}, + `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, +} + +func TestJSONArraysOfStrings(t *testing.T) { + for json, expected := range validJSONArraysOfStrings { + if node, _, err := parseJSON(json); err != nil { + t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) + } else { + i := 0 + for node != nil { + if i >= len(expected) { + t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) + } + if node.Value != expected[i] { + t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) + } + node = node.Next + i++ + } + if i != len(expected) { + t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) + } + } + } + for _, json := range invalidJSONArraysOfStrings { + if _, _, err := parseJSON(json); err != errDockerfileNotStringArray { + t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) + } + } +} diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go new file mode 100644 index 00000000..9bea21a9 --- /dev/null +++ b/builder/parser/line_parsers.go @@ -0,0 +1,295 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "unicode" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + _, child, err := parseLine(rest) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(rest); pos++ { + if pos != len(rest) { + ch = rune(rest[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall thru + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + + // Look for = and if not there assume + // we're doing the old stuff and + // just read the rest of the line + if !strings.Contains(word, "=") { + word = strings.TrimSpace(rest[pos:]) + words = append(words, word) + break + } + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == '\\' { + if pos+1 == len(rest) { + continue // just skip \ at end + } + // If we're not quoted and we see a \, then always just + // add \ plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos++ + ch = rune(rest[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // \ is special except for ' quotes - can't escape anything for ' + if ch == '\\' && quote != '\'' { + if pos+1 == len(rest) { + phase = inWord + continue // just skip \ at end + } + pos++ + nextCh := rune(rest[pos]) + word += string(ch) + ch = nextCh + } + word += string(ch) + } + } + + if len(words) == 0 { + return nil, nil, nil + } + + // Old format (KEY name value) + var rootnode *Node + + if !strings.Contains(words[0], "=") { + node := &Node{} + rootnode = node + strs := tokenWhitespace.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf(key + " must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + } else { + var prevNode *Node + for i, word := range words { + if !strings.Contains(word, "=") { + return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + parts := strings.SplitN(word, "=", 2) + + name := &Node{} + value := &Node{} + + name.Next = value + name.Value = parts[0] + value.Value = parts[1] + + if i == 0 { + rootnode = name + } else { + prevNode.Next = name + } + prevNode = value + } + } + + return rootnode, nil, nil +} + +func parseEnv(rest string) (*Node, map[string]bool, error) { + return parseNameVal(rest, "ENV") +} + +func parseLabel(rest string) (*Node, map[string]bool, error) { + return parseNameVal(rest, "LABEL") +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string) (*Node, map[string]bool, error) { + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest) +} diff --git a/builder/parser/parser.go b/builder/parser/parser.go new file mode 100644 index 00000000..c126b27a --- /dev/null +++ b/builder/parser/parser.go @@ -0,0 +1,143 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "io" + "regexp" + "strings" + "unicode" + + "github.com/docker/docker/builder/command" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set +} + +var ( + dispatch map[string]func(string) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenLineContinuation = regexp.MustCompile(`\\[ \t]*$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // recieves the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string) (*Node, map[string]bool, error){ + command.User: parseString, + command.Onbuild: parseSubCommand, + command.Workdir: parseString, + command.Env: parseEnv, + command.Label: parseLabel, + command.Maintainer: parseString, + command.From: parseString, + command.Add: parseMaybeJSONToList, + command.Copy: parseMaybeJSONToList, + command.Run: parseMaybeJSON, + command.Cmd: parseMaybeJSON, + command.Entrypoint: parseMaybeJSON, + command.Expose: parseStringsWhitespaceDelimited, + command.Volume: parseMaybeJSONToList, + } +} + +// parse a line and return the remainder. +func parseLine(line string) (string, *Node, error) { + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if tokenLineContinuation.MatchString(line) { + line = tokenLineContinuation.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, flags, args, err := splitCommand(line) + if err != nil { + return "", nil, err + } + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args) + if err != nil { + return "", nil, err + } + + node.Next = sexp + node.Attributes = attrs + node.Original = line + node.Flags = flags + + return "", node, nil +} + +// Parse is the main parse routine. +// It handles an io.ReadWriteCloser and returns the root of the AST. +func Parse(rwc io.Reader) (*Node, error) { + root := &Node{} + scanner := bufio.NewScanner(rwc) + + for scanner.Scan() { + scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) + line, child, err := parseLine(scannedLine) + if err != nil { + return nil, err + } + + if line != "" && child == nil { + for scanner.Scan() { + newline := scanner.Text() + + if stripComments(strings.TrimSpace(newline)) == "" { + continue + } + + line, child, err = parseLine(line + newline) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + if child == nil && line != "" { + line, child, err = parseLine(line) + if err != nil { + return nil, err + } + } + } + + if child != nil { + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/builder/parser/parser_test.go b/builder/parser/parser_test.go new file mode 100644 index 00000000..6b55a611 --- /dev/null +++ b/builder/parser/parser_test.go @@ -0,0 +1,75 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" + +func getDirs(t *testing.T, dir string) []string { + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + dirs, err := f.Readdirnames(0) + if err != nil { + t.Fatal(err) + } + + return dirs +} + +func TestTestNegative(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", dir, err) + } + + _, err = Parse(df) + if err == nil { + t.Fatalf("No error parsing broken dockerfile for %s", dir) + } + + df.Close() + } +} + +func TestTestData(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir, "Dockerfile") + resultfile := filepath.Join(testDir, dir, "result") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", dir, err) + } + defer df.Close() + + ast, err := Parse(df) + if err != nil { + t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) + } + + content, err := ioutil.ReadFile(resultfile) + if err != nil { + t.Fatalf("Error reading %s's result file: %v", dir, err) + } + + if ast.Dump()+"\n" != string(content) { + fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) + fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) + t.Fatalf("%s: AST dump of dockerfile does not match result", dir) + } + } +} diff --git a/builder/parser/testfiles-negative/env_no_value/Dockerfile b/builder/parser/testfiles-negative/env_no_value/Dockerfile new file mode 100644 index 00000000..1d655787 --- /dev/null +++ b/builder/parser/testfiles-negative/env_no_value/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +ENV PATH diff --git a/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile b/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 00000000..d1be4596 --- /dev/null +++ b/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff --git a/builder/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/builder/parser/testfiles/ADD-COPY-with-JSON/Dockerfile new file mode 100644 index 00000000..49372b06 --- /dev/null +++ b/builder/parser/testfiles/ADD-COPY-with-JSON/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 +MAINTAINER Seongyeol Lim + +COPY . /go/src/github.com/docker/docker +ADD . / +ADD [ "vimrc", "/tmp" ] +COPY [ "bashrc", "/tmp" ] +COPY [ "test file", "/tmp" ] +ADD [ "test file", "/tmp/test file" ] diff --git a/builder/parser/testfiles/ADD-COPY-with-JSON/result b/builder/parser/testfiles/ADD-COPY-with-JSON/result new file mode 100644 index 00000000..86c3fef7 --- /dev/null +++ b/builder/parser/testfiles/ADD-COPY-with-JSON/result @@ -0,0 +1,8 @@ +(from "ubuntu:14.04") +(maintainer "Seongyeol Lim ") +(copy "." "/go/src/github.com/docker/docker") +(add "." "/") +(add "vimrc" "/tmp") +(copy "bashrc" "/tmp") +(copy "test file" "/tmp") +(add "test file" "/tmp/test file") diff --git a/builder/parser/testfiles/brimstone-consuldock/Dockerfile b/builder/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 00000000..5c75a2e0 --- /dev/null +++ b/builder/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,25 @@ +FROM brimstone/ubuntu:14.04 + +MAINTAINER brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/parser/testfiles/brimstone-consuldock/result b/builder/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 00000000..227f748c --- /dev/null +++ b/builder/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(maintainer "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/builder/parser/testfiles/brimstone-docker-consul/Dockerfile b/builder/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 00000000..25ae3521 --- /dev/null +++ b/builder/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/parser/testfiles/brimstone-docker-consul/result b/builder/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 00000000..16492e51 --- /dev/null +++ b/builder/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/builder/parser/testfiles/continueIndent/Dockerfile b/builder/parser/testfiles/continueIndent/Dockerfile new file mode 100644 index 00000000..42b324e7 --- /dev/null +++ b/builder/parser/testfiles/continueIndent/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:14.04 + +RUN echo hello\ + world\ + goodnight \ + moon\ + light\ +ning +RUN echo hello \ + world +RUN echo hello \ +world +RUN echo hello \ +goodbye\ +frog +RUN echo hello \ +world +RUN echo hi \ + \ + world \ +\ + good\ +\ +night +RUN echo goodbye\ +frog +RUN echo good\ +bye\ +frog + +RUN echo hello \ +# this is a comment + +# this is a comment with a blank line surrounding it + +this is some more useful stuff diff --git a/builder/parser/testfiles/continueIndent/result b/builder/parser/testfiles/continueIndent/result new file mode 100644 index 00000000..268ae073 --- /dev/null +++ b/builder/parser/testfiles/continueIndent/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(run "echo hello world goodnight moon lightning") +(run "echo hello world") +(run "echo hello world") +(run "echo hello goodbyefrog") +(run "echo hello world") +(run "echo hi world goodnight") +(run "echo goodbyefrog") +(run "echo goodbyefrog") +(run "echo hello this is some more useful stuff") diff --git a/builder/parser/testfiles/cpuguy83-nagios/Dockerfile b/builder/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 00000000..8ccb71a5 --- /dev/null +++ b/builder/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/builder/parser/testfiles/cpuguy83-nagios/result b/builder/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 00000000..25dd3ddf --- /dev/null +++ b/builder/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") diff --git a/builder/parser/testfiles/docker/Dockerfile b/builder/parser/testfiles/docker/Dockerfile new file mode 100644 index 00000000..4a356254 --- /dev/null +++ b/builder/parser/testfiles/docker/Dockerfile @@ -0,0 +1,104 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + lxc=1.0* \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get golang.org/x/tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/builder/parser/testfiles/docker/result b/builder/parser/testfiles/docker/result new file mode 100644 index 00000000..773b640a --- /dev/null +++ b/builder/parser/testfiles/docker/result @@ -0,0 +1,24 @@ +(from "ubuntu:14.04") +(maintainer "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tlxc=1.0* \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get golang.org/x/tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") diff --git a/builder/parser/testfiles/env/Dockerfile b/builder/parser/testfiles/env/Dockerfile new file mode 100644 index 00000000..08fa18ac --- /dev/null +++ b/builder/parser/testfiles/env/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu +ENV name value +ENV name=value +ENV name=value name2=value2 +ENV name="value value1" +ENV name=value\ value2 +ENV name="value'quote space'value2" +ENV name='value"double quote"value2' +ENV name=value\ value2 name2=value2\ value3 +ENV name="a\"b" +ENV name="a\'b" +ENV name='a\'b' +ENV name='a\'b'' +ENV name='a\"b' +ENV name="''" +# don't put anything after the next line - it must be the last line of the +# Dockerfile and it must end with \ +ENV name=value \ + name1=value1 \ + name2="value2a \ + value2b" \ + name3="value3a\n\"value3b\"" \ + name4="value4a\\nvalue4b" \ diff --git a/builder/parser/testfiles/env/result b/builder/parser/testfiles/env/result new file mode 100644 index 00000000..ba0a6dd7 --- /dev/null +++ b/builder/parser/testfiles/env/result @@ -0,0 +1,16 @@ +(from "ubuntu") +(env "name" "value") +(env "name" "value") +(env "name" "value" "name2" "value2") +(env "name" "\"value value1\"") +(env "name" "value\\ value2") +(env "name" "\"value'quote space'value2\"") +(env "name" "'value\"double quote\"value2'") +(env "name" "value\\ value2" "name2" "value2\\ value3") +(env "name" "\"a\\\"b\"") +(env "name" "\"a\\'b\"") +(env "name" "'a\\'b'") +(env "name" "'a\\'b''") +(env "name" "'a\\\"b'") +(env "name" "\"''\"") +(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/builder/parser/testfiles/escapes/Dockerfile b/builder/parser/testfiles/escapes/Dockerfile new file mode 100644 index 00000000..1ffb17ef --- /dev/null +++ b/builder/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +RUN foo \ + +bar \ + +baz + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/builder/parser/testfiles/escapes/result b/builder/parser/testfiles/escapes/result new file mode 100644 index 00000000..13e409cb --- /dev/null +++ b/builder/parser/testfiles/escapes/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(maintainer "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(run "foo bar baz") +(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/builder/parser/testfiles/flags/Dockerfile b/builder/parser/testfiles/flags/Dockerfile new file mode 100644 index 00000000..2418e0f0 --- /dev/null +++ b/builder/parser/testfiles/flags/Dockerfile @@ -0,0 +1,10 @@ +FROM scratch +COPY foo /tmp/ +COPY --user=me foo /tmp/ +COPY --doit=true foo /tmp/ +COPY --user=me --doit=true foo /tmp/ +COPY --doit=true -- foo /tmp/ +COPY -- foo /tmp/ +CMD --doit [ "a", "b" ] +CMD --doit=true -- [ "a", "b" ] +CMD --doit -- [ ] diff --git a/builder/parser/testfiles/flags/result b/builder/parser/testfiles/flags/result new file mode 100644 index 00000000..4578f4cb --- /dev/null +++ b/builder/parser/testfiles/flags/result @@ -0,0 +1,10 @@ +(from "scratch") +(copy "foo" "/tmp/") +(copy ["--user=me"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy ["--user=me" "--doit=true"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy "foo" "/tmp/") +(cmd ["--doit"] "a" "b") +(cmd ["--doit=true"] "a" "b") +(cmd ["--doit"]) diff --git a/builder/parser/testfiles/influxdb/Dockerfile b/builder/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 00000000..587fb9b5 --- /dev/null +++ b/builder/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/builder/parser/testfiles/influxdb/result b/builder/parser/testfiles/influxdb/result new file mode 100644 index 00000000..0998e87e --- /dev/null +++ b/builder/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 00000000..39fe27d9 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 00000000..afc220c2 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 00000000..eaae081a --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 00000000..484804e2 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 00000000..c3ac63c0 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 00000000..61478912 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 00000000..5fd4afa5 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 00000000..1ffbb8ff --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 00000000..30cc4bb4 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 00000000..32048147 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/builder/parser/testfiles/json/Dockerfile b/builder/parser/testfiles/json/Dockerfile new file mode 100644 index 00000000..a5869171 --- /dev/null +++ b/builder/parser/testfiles/json/Dockerfile @@ -0,0 +1,8 @@ +CMD [] +CMD [""] +CMD ["a"] +CMD ["a","b"] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/builder/parser/testfiles/json/result b/builder/parser/testfiles/json/result new file mode 100644 index 00000000..c6553e6e --- /dev/null +++ b/builder/parser/testfiles/json/result @@ -0,0 +1,8 @@ +(cmd) +(cmd "") +(cmd "a") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 00000000..35f9c24a --- /dev/null +++ b/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/builder/parser/testfiles/kartar-entrypoint-oddities/result b/builder/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 00000000..b5ac6fe4 --- /dev/null +++ b/builder/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(maintainer "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") diff --git a/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 00000000..188395fe --- /dev/null +++ b/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +MAINTAINER docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/builder/parser/testfiles/lk4d4-the-edge-case-generator/result b/builder/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 00000000..6f7d57a3 --- /dev/null +++ b/builder/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(maintainer "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") diff --git a/builder/parser/testfiles/mail/Dockerfile b/builder/parser/testfiles/mail/Dockerfile new file mode 100644 index 00000000..f64c1168 --- /dev/null +++ b/builder/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/builder/parser/testfiles/mail/result b/builder/parser/testfiles/mail/result new file mode 100644 index 00000000..a0efcf04 --- /dev/null +++ b/builder/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") diff --git a/builder/parser/testfiles/multiple-volumes/Dockerfile b/builder/parser/testfiles/multiple-volumes/Dockerfile new file mode 100644 index 00000000..57bb5976 --- /dev/null +++ b/builder/parser/testfiles/multiple-volumes/Dockerfile @@ -0,0 +1,3 @@ +FROM foo + +VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/builder/parser/testfiles/multiple-volumes/result b/builder/parser/testfiles/multiple-volumes/result new file mode 100644 index 00000000..18dbdeea --- /dev/null +++ b/builder/parser/testfiles/multiple-volumes/result @@ -0,0 +1,2 @@ +(from "foo") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/builder/parser/testfiles/mumble/Dockerfile b/builder/parser/testfiles/mumble/Dockerfile new file mode 100644 index 00000000..5b9ec06a --- /dev/null +++ b/builder/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/builder/parser/testfiles/mumble/result b/builder/parser/testfiles/mumble/result new file mode 100644 index 00000000..a0036a94 --- /dev/null +++ b/builder/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") diff --git a/builder/parser/testfiles/nginx/Dockerfile b/builder/parser/testfiles/nginx/Dockerfile new file mode 100644 index 00000000..bf8368e1 --- /dev/null +++ b/builder/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/builder/parser/testfiles/nginx/result b/builder/parser/testfiles/nginx/result new file mode 100644 index 00000000..56ddb6f2 --- /dev/null +++ b/builder/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") diff --git a/builder/parser/testfiles/tf2/Dockerfile b/builder/parser/testfiles/tf2/Dockerfile new file mode 100644 index 00000000..72b79bdd --- /dev/null +++ b/builder/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/builder/parser/testfiles/tf2/result b/builder/parser/testfiles/tf2/result new file mode 100644 index 00000000..d4f94cd8 --- /dev/null +++ b/builder/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/builder/parser/testfiles/weechat/Dockerfile b/builder/parser/testfiles/weechat/Dockerfile new file mode 100644 index 00000000..48420881 --- /dev/null +++ b/builder/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/builder/parser/testfiles/weechat/result b/builder/parser/testfiles/weechat/result new file mode 100644 index 00000000..c3abb4c5 --- /dev/null +++ b/builder/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") diff --git a/builder/parser/testfiles/znc/Dockerfile b/builder/parser/testfiles/znc/Dockerfile new file mode 100644 index 00000000..3a4da6e9 --- /dev/null +++ b/builder/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/builder/parser/testfiles/znc/result b/builder/parser/testfiles/znc/result new file mode 100644 index 00000000..5493b255 --- /dev/null +++ b/builder/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") diff --git a/builder/parser/utils.go b/builder/parser/utils.go new file mode 100644 index 00000000..352d7a7e --- /dev/null +++ b/builder/parser/utils.go @@ -0,0 +1,176 @@ +package parser + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { + fn := dispatch[cmd] + + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + + sexp, attrs, err := fn(args) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if tokenComment.MatchString(line) { + return tokenComment.ReplaceAllString(line, "") + } + + return line +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found someting with "--", fall thru + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/builder/shell_parser.go b/builder/shell_parser.go new file mode 100644 index 00000000..c2ffcc96 --- /dev/null +++ b/builder/shell_parser.go @@ -0,0 +1,243 @@ +package builder + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "strings" + "unicode" +) + +type shellWord struct { + word string + envs []string + pos int +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func ProcessWord(word string, env []string) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + return sw.process() +} + +func (sw *shellWord) process() (string, error) { + return sw.processStopOn('\000') +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, error) { + var result string + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.pos < len(sw.word) { + ch := sw.peek() + if stopChar != '\000' && ch == stopChar { + sw.next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", err + } + result += tmp + } else { + // Not special, just add it to the result + ch = sw.next() + if ch == '\\' { + // '\' escapes, except end of line + ch = sw.next() + if ch == '\000' { + continue + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) peek() rune { + if sw.pos == len(sw.word) { + return '\000' + } + return rune(sw.word[sw.pos]) +} + +func (sw *shellWord) next() rune { + if sw.pos == len(sw.word) { + return '\000' + } + ch := rune(sw.word[sw.pos]) + sw.pos++ + return ch +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.next() + + for { + ch := sw.next() + if ch == '\000' || ch == '\'' { + break + } + result += string(ch) + } + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ + var result string + + sw.next() + + for sw.pos < len(sw.word) { + ch := sw.peek() + if ch == '"' { + sw.next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.next() + if ch == '\\' { + chNext := sw.peek() + + if chNext == '\000' { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.next() + ch := sw.peek() + if ch == '{' { + sw.next() + name := sw.processName() + ch = sw.peek() + if ch == '}' { + // Normal ${xx} case + sw.next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.next() // skip over : + modifier := sw.next() + + word, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) + } + } + return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.pos < len(sw.word) { + ch := sw.peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + if name != env[:i] { + continue + } + return env[i+1:] + } + return "" +} diff --git a/builder/shell_parser_test.go b/builder/shell_parser_test.go new file mode 100644 index 00000000..79260492 --- /dev/null +++ b/builder/shell_parser_test.go @@ -0,0 +1,51 @@ +package builder + +import ( + "bufio" + "os" + "strings" + "testing" +) + +func TestShellParser(t *testing.T) { + file, err := os.Open("words") + if err != nil { + t.Fatalf("Can't open 'words': %s", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash"} + for scanner.Scan() { + line := scanner.Text() + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in 'words' - should be 2 words:%q", words) + } + + words[0] = strings.TrimSpace(words[0]) + words[1] = strings.TrimSpace(words[1]) + + newWord, err := ProcessWord(words[0], envs) + + if err != nil { + newWord = "error" + } + + if newWord != words[1] { + t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1]) + } + } +} diff --git a/builder/support.go b/builder/support.go new file mode 100644 index 00000000..4cc25dcb --- /dev/null +++ b/builder/support.go @@ -0,0 +1,27 @@ +package builder + +import ( + "regexp" + "strings" +) + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} + +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/builder/support_test.go b/builder/support_test.go new file mode 100644 index 00000000..417e4f44 --- /dev/null +++ b/builder/support_test.go @@ -0,0 +1,41 @@ +package builder + +import ( + "fmt" + "testing" +) + +func TestSelectAcceptableMIME(t *testing.T) { + validMimeStrings := []string{ + "application/x-bzip2", + "application/bzip2", + "application/gzip", + "application/x-gzip", + "application/x-xz", + "application/xz", + "application/tar", + "application/x-tar", + "application/octet-stream", + "text/plain", + } + + invalidMimeStrings := []string{ + "", + "application/octet", + "application/json", + } + + for _, m := range invalidMimeStrings { + if len(selectAcceptableMIME(m)) > 0 { + err := fmt.Errorf("Should not have accepted %q", m) + t.Fatal(err) + } + } + + for _, m := range validMimeStrings { + if str := selectAcceptableMIME(m); str == "" { + err := fmt.Errorf("Should have accepted %q", m) + t.Fatal(err) + } + } +} diff --git a/builder/words b/builder/words new file mode 100644 index 00000000..1114a7e4 --- /dev/null +++ b/builder/words @@ -0,0 +1,58 @@ +hello | hello +he'll'o | hello +he'llo | hello +he\'llo | he'llo +he\\'llo | he\llo +abc\tdef | abctdef +"abc\tdef" | abc\tdef +'abc\tdef' | abc\tdef +hello\ | hello +hello\\ | hello\ +"hello | hello +"hello\" | hello" +"hel'lo" | hel'lo +'hello | hello +'hello\' | hello\ +"''" | '' +$. | $. +$1 | +he$1x | hex +he$.x | he$.x +he$pwd. | he. +he$PWD | he/home +he\$PWD | he$PWD +he\\$PWD | he\/home +he\${} | he${} +he\${}xx | he${}xx +he${} | he +he${}xx | hexx +he${hi} | he +he${hi}xx | hexx +he${PWD} | he/home +he${.} | error +he${XXX:-000}xx | he000xx +he${PWD:-000}xx | he/homexx +he${XXX:-$PWD}xx | he/homexx +he${XXX:-${PWD:-yyy}}xx | he/homexx +he${XXX:-${YYY:-yyy}}xx | heyyyxx +he${XXX:YYY} | error +he${XXX:+${PWD}}xx | hexx +he${PWD:+${XXX}}xx | hexx +he${PWD:+${SHELL}}xx | hebashxx +he${XXX:+000}xx | hexx +he${PWD:+000}xx | he000xx +'he${XX}' | he${XX} +"he${PWD}" | he/home +"he'$PWD'" | he'/home' +"$PWD" | /home +'$PWD' | $PWD +'\$PWD' | \$PWD +'"hello"' | "hello" +he\$PWD | he$PWD +"he\$PWD" | he$PWD +'he\$PWD' | he\$PWD +he${PWD | error +he${PWD:=000}xx | error +he${PWD:+${PWD}:}xx | he/home:xx +he${XXX:-\$PWD:}xx | he$PWD:xx +he${XXX:-\${PWD}z}xx | he${PWDz}xx diff --git a/cli/cli.go b/cli/cli.go new file mode 100644 index 00000000..8e559fc3 --- /dev/null +++ b/cli/cli.go @@ -0,0 +1,200 @@ +package cli + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" + "strings" + + flag "github.com/docker/docker/pkg/mflag" +) + +// Cli represents a command line interface. +type Cli struct { + Stderr io.Writer + handlers []Handler + Usage func() +} + +// Handler holds the different commands Cli will call +// It should have methods with names starting with `Cmd` like: +// func (h myHandler) CmdFoo(args ...string) error +type Handler interface{} + +// Initializer can be optionally implemented by a Handler to +// initialize before each call to one of its commands. +type Initializer interface { + Initialize() error +} + +// New instantiates a ready-to-use Cli. +func New(handlers ...Handler) *Cli { + // make the generic Cli object the first cli handler + // in order to handle `docker help` appropriately + cli := new(Cli) + cli.handlers = append([]Handler{cli}, handlers...) + return cli +} + +// initErr is an error returned upon initialization of a handler implementing Initializer. +type initErr struct{ error } + +func (err initErr) Error() string { + return err.Error() +} + +func (cli *Cli) command(args ...string) (func(...string) error, error) { + for _, c := range cli.handlers { + if c == nil { + continue + } + camelArgs := make([]string, len(args)) + for i, s := range args { + if len(s) == 0 { + return nil, errors.New("empty command") + } + camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) + } + methodName := "Cmd" + strings.Join(camelArgs, "") + method := reflect.ValueOf(c).MethodByName(methodName) + if method.IsValid() { + if c, ok := c.(Initializer); ok { + if err := c.Initialize(); err != nil { + return nil, initErr{err} + } + } + return method.Interface().(func(...string) error), nil + } + } + return nil, errors.New("command not found") +} + +// Run executes the specified command. +func (cli *Cli) Run(args ...string) error { + if len(args) > 1 { + command, err := cli.command(args[:2]...) + switch err := err.(type) { + case nil: + return command(args[2:]...) + case initErr: + return err.error + } + } + if len(args) > 0 { + command, err := cli.command(args[0]) + switch err := err.(type) { + case nil: + return command(args[1:]...) + case initErr: + return err.error + } + cli.noSuchCommand(args[0]) + } + return cli.CmdHelp() +} + +func (cli *Cli) noSuchCommand(command string) { + if cli.Stderr == nil { + cli.Stderr = os.Stderr + } + fmt.Fprintf(cli.Stderr, "docker: '%s' is not a docker command.\nSee 'docker --help'.\n", command) + os.Exit(1) +} + +// CmdHelp displays information on a Docker command. +// +// If more than one command is specified, information is only shown for the first command. +// +// Usage: docker help COMMAND or docker COMMAND --help +func (cli *Cli) CmdHelp(args ...string) error { + if len(args) > 1 { + command, err := cli.command(args[:2]...) + switch err := err.(type) { + case nil: + command("--help") + return nil + case initErr: + return err.error + } + } + if len(args) > 0 { + command, err := cli.command(args[0]) + switch err := err.(type) { + case nil: + command("--help") + return nil + case initErr: + return err.error + } + cli.noSuchCommand(args[0]) + } + + if cli.Usage == nil { + flag.Usage() + } else { + cli.Usage() + } + + return nil +} + +// Subcmd is a subcommand of the main "docker" command. +// A subcommand represents an action that can be performed +// from the Docker command line client. +// +// To see all available subcommands, run "docker --help". +func Subcmd(name string, synopses []string, description string, exitOnError bool) *flag.FlagSet { + var errorHandling flag.ErrorHandling + if exitOnError { + errorHandling = flag.ExitOnError + } else { + errorHandling = flag.ContinueOnError + } + flags := flag.NewFlagSet(name, errorHandling) + flags.Usage = func() { + flags.ShortUsage() + flags.PrintDefaults() + } + + flags.ShortUsage = func() { + options := "" + if flags.FlagCountUndeprecated() > 0 { + options = " [OPTIONS]" + } + + if len(synopses) == 0 { + synopses = []string{""} + } + + // Allow for multiple command usage synopses. + for i, synopsis := range synopses { + lead := "\t" + if i == 0 { + // First line needs the word 'Usage'. + lead = "Usage:\t" + } + + if synopsis != "" { + synopsis = " " + synopsis + } + + fmt.Fprintf(flags.Out(), "\n%sdocker %s%s%s", lead, name, options, synopsis) + } + + fmt.Fprintf(flags.Out(), "\n\n%s\n", description) + } + + return flags +} + +// An StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/cli/client.go b/cli/client.go new file mode 100644 index 00000000..6a82eb52 --- /dev/null +++ b/cli/client.go @@ -0,0 +1,12 @@ +package cli + +import flag "github.com/docker/docker/pkg/mflag" + +// ClientFlags represents flags for the docker client. +type ClientFlags struct { + FlagSet *flag.FlagSet + Common *CommonFlags + PostParse func() + + ConfigDir string +} diff --git a/cli/common.go b/cli/common.go new file mode 100644 index 00000000..85a02ac4 --- /dev/null +++ b/cli/common.go @@ -0,0 +1,20 @@ +package cli + +import ( + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/tlsconfig" +) + +// CommonFlags represents flags that are common to both the client and the daemon. +type CommonFlags struct { + FlagSet *flag.FlagSet + PostParse func() + + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options + TrustKey string +} diff --git a/cliconfig/config.go b/cliconfig/config.go new file mode 100644 index 00000000..d00bc716 --- /dev/null +++ b/cliconfig/config.go @@ -0,0 +1,227 @@ +package cliconfig + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/system" +) + +const ( + // ConfigFile is the name of config file + ConfigFileName = "config.json" + oldConfigfile = ".dockercfg" + + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + defaultIndexserver = "https://index.docker.io/v1/" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") +) + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), ".docker") + } +} + +// ConfigDir returns the directory the configuration file is stored in +func ConfigDir() string { + return configDir +} + +// SetConfigDir sets the directory the configuration file is stored in +func SetConfigDir(dir string) { + configDir = dir +} + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth"` + Email string `json:"email"` + ServerAddress string `json:"serveraddress,omitempty"` +} + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + filename string // Note: not serialized - for internal use only +} + +// NewConfigFile initilizes an empty configuration file for the given filename 'fn' +func NewConfigFile(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]AuthConfig), + HTTPHeaders: make(map[string]string), + filename: fn, + } +} + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and return values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*ConfigFile, error) { + if configDir == "" { + configDir = ConfigDir() + } + + configFile := ConfigFile{ + AuthConfigs: make(map[string]AuthConfig), + filename: filepath.Join(configDir, ConfigFileName), + } + + // Try happy path first - latest config file + if _, err := os.Stat(configFile.filename); err == nil { + file, err := os.Open(configFile.filename) + if err != nil { + return &configFile, err + } + defer file.Close() + + if err := json.NewDecoder(file).Decode(&configFile); err != nil { + return &configFile, err + } + + for addr, ac := range configFile.AuthConfigs { + ac.Username, ac.Password, err = DecodeAuth(ac.Auth) + if err != nil { + return &configFile, err + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + + return &configFile, nil + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return &configFile, err + } + + // Can't find latest config file so check for the old one + confFile := filepath.Join(homedir.Get(), oldConfigfile) + if _, err := os.Stat(confFile); err != nil { + return &configFile, nil //missing file is not an error + } + + b, err := ioutil.ReadFile(confFile) + if err != nil { + return &configFile, err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return &configFile, fmt.Errorf("The Auth config file is empty") + } + authConfig := AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = DecodeAuth(origAuth[1]) + if err != nil { + return &configFile, err + } + origEmail := strings.Split(arr[1], " = ") + if len(origEmail) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Email = origEmail[1] + authConfig.ServerAddress = defaultIndexserver + configFile.AuthConfigs[defaultIndexserver] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = DecodeAuth(authConfig.Auth) + if err != nil { + return &configFile, err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return &configFile, nil +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = EncodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + + if err := system.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil { + return err + } + + if err := ioutil.WriteFile(configFile.filename, data, 0600); err != nil { + return err + } + + return nil +} + +// Filename returns the name of the configuration file +func (configFile *ConfigFile) Filename() string { + return configFile.filename +} + +// EncodeAuth creates a base64 encoded string to containing authorization information +func EncodeAuth(authConfig *AuthConfig) string { + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// DecodeAuth decodes a base64 encoded string and returns username and password +func DecodeAuth(authStr string) (string, string, error) { + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} diff --git a/cliconfig/config_test.go b/cliconfig/config_test.go new file mode 100644 index 00000000..25fb58a4 --- /dev/null +++ b/cliconfig/config_test.go @@ -0,0 +1,188 @@ +package cliconfig + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/homedir" +) + +func TestMissingFile(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestSaveFileToDirs(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + + tmpHome += "/.docker" + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestEmptyFile(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, ConfigFileName) + ioutil.WriteFile(fn, []byte(""), 0600) + + _, err := Load(tmpHome) + if err == nil { + t.Fatalf("Was supposed to fail") + } +} + +func TestEmptyJson(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, ConfigFileName) + ioutil.WriteFile(fn, []byte("{}"), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestOldJson(t *testing.T) { + if runtime.GOOS == "windows" { + return + } + + tmpHome, _ := ioutil.TempDir("", "config-test") + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + ioutil.WriteFile(fn, []byte(js), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) || + !strings.Contains(string(buf), "user@example.com") { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestNewJson(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, ConfigFileName) + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` + ioutil.WriteFile(fn, []byte(js), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) || + !strings.Contains(string(buf), "user@example.com") { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestJsonWithPsFormat(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + ioutil.WriteFile(fn, []byte(js), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { + t.Fatalf("Unknown ps format: %s\n", config.PsFormat) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"psFormat":`) || + !strings.Contains(string(buf), "{{.ID}}") { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} diff --git a/contrib/README b/contrib/README new file mode 100644 index 00000000..92b1d944 --- /dev/null +++ b/contrib/README @@ -0,0 +1,4 @@ +The `contrib` directory contains scripts, images, and other helpful things +which are not part of the core docker distribution. Please note that they +could be out of date, since they do not receive the same attention as the +rest of the repository. diff --git a/contrib/REVIEWERS b/contrib/REVIEWERS new file mode 100644 index 00000000..18e05a30 --- /dev/null +++ b/contrib/REVIEWERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/contrib/apparmor/docker-engine b/contrib/apparmor/docker-engine new file mode 100644 index 00000000..bdfc2075 --- /dev/null +++ b/contrib/apparmor/docker-engine @@ -0,0 +1,151 @@ +@{DOCKER_GRAPH_PATH}=/var/lib/docker + +profile /usr/bin/docker (attach_disconnected, complain) { + # Prevent following links to these files during container setup. + deny /etc/** mkl, + deny /dev/** kl, + deny /sys/** mkl, + deny /proc/** mkl, + + mount -> @{DOCKER_GRAPH_PATH}/**, + mount -> /, + mount -> /proc/**, + mount -> /sys/**, + mount -> /run/docker/netns/**, + + umount, + pivot_root, + signal (receive) peer=@{profile_name}, + signal (receive) peer=unconfined, + signal (send), + ipc rw, + network, + capability, + owner /** rw, + /var/lib/docker/** rwl, + + # For non-root client use: + /dev/urandom r, + /run/docker.sock rw, + /proc/** r, + /sys/kernel/mm/hugepages/ r, + /etc/localtime r, + + ptrace peer=@{profile_name}, + ptrace (read) peer=docker-default, + deny ptrace (trace) peer=docker-default, + deny ptrace peer=/usr/bin/docker///bin/ps, + + /usr/bin/docker pix, + /sbin/xtables-multi rCx, + /sbin/iptables rCx, + /sbin/modprobe rCx, + /sbin/auplink rCx, + /bin/kmod rCx, + /usr/bin/xz rCx, + /bin/ps rCx, + /bin/cat rCx, + /sbin/zfs rCx, + + # Transitions + change_profile -> docker-*, + change_profile -> unconfined, + + profile /bin/cat (complain) { + /etc/ld.so.cache r, + /lib/** r, + /dev/null rw, + /proc r, + /bin/cat mr, + + # For reading in 'docker stats': + /proc/[0-9]*/net/dev r, + } + profile /bin/ps (complain) { + /etc/ld.so.cache r, + /etc/localtime r, + /etc/passwd r, + /etc/nsswitch.conf r, + /lib/** r, + /proc/[0-9]*/** r, + /dev/null rw, + /bin/ps mr, + + # We don't need ptrace so we'll deny and ignore the error. + deny ptrace (read, trace), + + # Quiet dac_override denials + deny capability dac_override, + deny capability dac_read_search, + deny capability sys_ptrace, + + /dev/tty r, + /proc/stat r, + /proc/cpuinfo r, + /proc/meminfo r, + /proc/uptime r, + /sys/devices/system/cpu/online r, + /proc/sys/kernel/pid_max r, + /proc/ r, + /proc/tty/drivers r, + } + profile /sbin/iptables (complain) { + signal (receive) peer=/usr/bin/docker, + capability net_admin, + } + profile /sbin/auplink flags=(attach_disconnected, complain) { + signal (receive) peer=/usr/bin/docker, + capability sys_admin, + capability dac_override, + + @{DOCKER_GRAPH_PATH}/aufs/** rw, + @{DOCKER_GRAPH_PATH}/tmp/** rw, + # For user namespaces: + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, + + /sys/fs/aufs/** r, + /lib/** r, + /apparmor/.null r, + /dev/null rw, + /etc/ld.so.cache r, + /sbin/auplink rm, + /proc/fs/aufs/** rw, + /proc/[0-9]*/mounts rw, + } + profile /sbin/modprobe /bin/kmod (complain) { + signal (receive) peer=/usr/bin/docker, + capability sys_module, + /etc/ld.so.cache r, + /lib/** r, + /dev/null rw, + /apparmor/.null rw, + /sbin/modprobe rm, + /bin/kmod rm, + /proc/cmdline r, + /sys/module/** r, + /etc/modprobe.d{/,/**} r, + } + # xz works via pipes, so we do not need access to the filesystem. + profile /usr/bin/xz (complain) { + signal (receive) peer=/usr/bin/docker, + /etc/ld.so.cache r, + /lib/** r, + /usr/bin/xz rm, + deny /proc/** rw, + deny /sys/** rw, + } + profile /sbin/xtables-multi (attach_disconnected, complain) { + /etc/ld.so.cache r, + /lib/** r, + /sbin/xtables-multi rm, + /apparmor/.null w, + /dev/null rw, + capability net_raw, + capability net_admin, + network raw, + } + profile /sbin/zfs (attach_disconnected, complain) { + file, + capability, + } +} diff --git a/contrib/builder/deb/README.md b/contrib/builder/deb/README.md new file mode 100644 index 00000000..a6fd70dc --- /dev/null +++ b/contrib/builder/deb/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-deb` + +This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. + +To add new tags, see [`contrib/builder/deb` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/contrib/builder/deb/build.sh b/contrib/builder/deb/build.sh new file mode 100755 index 00000000..8271d9dc --- /dev/null +++ b/contrib/builder/deb/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/contrib/builder/deb/debian-jessie/Dockerfile b/contrib/builder/deb/debian-jessie/Dockerfile new file mode 100644 index 00000000..de888a1a --- /dev/null +++ b/contrib/builder/deb/debian-jessie/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM debian:jessie + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/debian-stretch/Dockerfile b/contrib/builder/deb/debian-stretch/Dockerfile new file mode 100644 index 00000000..ee462824 --- /dev/null +++ b/contrib/builder/deb/debian-stretch/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM debian:stretch + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/debian-wheezy/Dockerfile b/contrib/builder/deb/debian-wheezy/Dockerfile new file mode 100644 index 00000000..dc9c3880 --- /dev/null +++ b/contrib/builder/deb/debian-wheezy/Dockerfile @@ -0,0 +1,15 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM debian:wheezy +RUN echo deb http://http.debian.net/debian wheezy-backports main > /etc/apt/sources.list.d/wheezy-backports.list + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/generate.sh b/contrib/builder/deb/generate.sh new file mode 100755 index 00000000..4bb7320e --- /dev/null +++ b/contrib/builder/deb/generate.sh @@ -0,0 +1,88 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! + # + + FROM $from + EOF + + case "$from" in + debian:wheezy) + # add -backports, like our users have to + echo "RUN echo deb http://http.debian.net/debian $suite-backports main > /etc/apt/sources.list.d/$suite-backports.list" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + extraBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libsqlite3-dev # for "sqlite3.h" + ) + + if [ "$suite" = 'precise' ]; then + # precise has a few package issues + + # - dh-systemd doesn't exist at all + packages=( "${packages[@]/dh-systemd}" ) + + # - libdevmapper-dev is missing critical structs (too old) + packages=( "${packages[@]/libdevmapper-dev}" ) + extraBuildTags+=' exclude_graphdriver_devicemapper' + + # - btrfs-tools is missing "ioctl.h" (too old), so it's useless + # (since kernels on precise are old too, just skip btrfs entirely) + packages=( "${packages[@]/btrfs-tools}" ) + extraBuildTags+=' exclude_graphdriver_btrfs' + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + awk '$1 == "ENV" && $2 == "DOCKER_BUILDTAGS" { print $0 "'"$extraBuildTags"'"; exit }' ../../../Dockerfile >> "$version/Dockerfile" +done diff --git a/contrib/builder/deb/ubuntu-precise/Dockerfile b/contrib/builder/deb/ubuntu-precise/Dockerfile new file mode 100644 index 00000000..50b4a12e --- /dev/null +++ b/contrib/builder/deb/ubuntu-precise/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM ubuntu:precise + +RUN apt-get update && apt-get install -y bash-completion build-essential curl ca-certificates debhelper git libapparmor-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux exclude_graphdriver_devicemapper exclude_graphdriver_btrfs diff --git a/contrib/builder/deb/ubuntu-trusty/Dockerfile b/contrib/builder/deb/ubuntu-trusty/Dockerfile new file mode 100644 index 00000000..aca7eecd --- /dev/null +++ b/contrib/builder/deb/ubuntu-trusty/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM ubuntu:trusty + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/ubuntu-vivid/Dockerfile b/contrib/builder/deb/ubuntu-vivid/Dockerfile new file mode 100644 index 00000000..ee264027 --- /dev/null +++ b/contrib/builder/deb/ubuntu-vivid/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM ubuntu:vivid + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/ubuntu-wily/Dockerfile b/contrib/builder/deb/ubuntu-wily/Dockerfile new file mode 100644 index 00000000..3f79c56e --- /dev/null +++ b/contrib/builder/deb/ubuntu-wily/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM ubuntu:wily + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/rpm/README.md b/contrib/builder/rpm/README.md new file mode 100644 index 00000000..153fbceb --- /dev/null +++ b/contrib/builder/rpm/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-rpm` + +This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. + +To add new tags, see [`contrib/builder/rpm` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/contrib/builder/rpm/build.sh b/contrib/builder/rpm/build.sh new file mode 100755 index 00000000..558f7ee0 --- /dev/null +++ b/contrib/builder/rpm/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/contrib/builder/rpm/centos-7/Dockerfile b/contrib/builder/rpm/centos-7/Dockerfile new file mode 100644 index 00000000..60fe4648 --- /dev/null +++ b/contrib/builder/rpm/centos-7/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! +# + +FROM centos:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS selinux diff --git a/contrib/builder/rpm/fedora-20/Dockerfile b/contrib/builder/rpm/fedora-20/Dockerfile new file mode 100644 index 00000000..f0c701bc --- /dev/null +++ b/contrib/builder/rpm/fedora-20/Dockerfile @@ -0,0 +1,15 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! +# + +FROM fedora:20 + +RUN yum install -y @development-tools fedora-packager +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS selinux diff --git a/contrib/builder/rpm/fedora-21/Dockerfile b/contrib/builder/rpm/fedora-21/Dockerfile new file mode 100644 index 00000000..3d84706a --- /dev/null +++ b/contrib/builder/rpm/fedora-21/Dockerfile @@ -0,0 +1,15 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! +# + +FROM fedora:21 + +RUN yum install -y @development-tools fedora-packager +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS selinux diff --git a/contrib/builder/rpm/fedora-22/Dockerfile b/contrib/builder/rpm/fedora-22/Dockerfile new file mode 100644 index 00000000..1c47b50a --- /dev/null +++ b/contrib/builder/rpm/fedora-22/Dockerfile @@ -0,0 +1,15 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! +# + +FROM fedora:22 + +RUN yum install -y @development-tools fedora-packager +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS selinux diff --git a/contrib/builder/rpm/generate.sh b/contrib/builder/rpm/generate.sh new file mode 100755 index 00000000..f673ec02 --- /dev/null +++ b/contrib/builder/rpm/generate.sh @@ -0,0 +1,84 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh +# to only update fedora-20/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + oraclelinux:*) + # get "Development Tools" packages and dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + ;; + *) + echo 'RUN yum install -y @development-tools fedora-packager' >> "$version/Dockerfile" + ;; + esac + + # this list is sorted alphabetically; please keep it that way + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libselinux-devel # for "libselinux.so" + sqlite-devel # for "sqlite3.h" + tar # older versions of dev-tools do not have tar + ) + + case "$from" in + oraclelinux:7) + # Enable the optional repository + packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) + ;; + esac + echo "RUN yum install -y ${packages[*]}" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo 'ENV DOCKER_BUILDTAGS selinux' >> "$version/Dockerfile" +done diff --git a/contrib/builder/rpm/oraclelinux-6/Dockerfile b/contrib/builder/rpm/oraclelinux-6/Dockerfile new file mode 100644 index 00000000..4c96b5dd --- /dev/null +++ b/contrib/builder/rpm/oraclelinux-6/Dockerfile @@ -0,0 +1,15 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! +# + +FROM oraclelinux:6 + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS selinux diff --git a/contrib/builder/rpm/oraclelinux-7/Dockerfile b/contrib/builder/rpm/oraclelinux-7/Dockerfile new file mode 100644 index 00000000..333a9f1f --- /dev/null +++ b/contrib/builder/rpm/oraclelinux-7/Dockerfile @@ -0,0 +1,15 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! +# + +FROM oraclelinux:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar + +ENV GO_VERSION 1.4.2 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS selinux diff --git a/contrib/check-config.sh b/contrib/check-config.sh new file mode 100755 index 00000000..9b2ede13 --- /dev/null +++ b/contrib/check-config.sh @@ -0,0 +1,239 @@ +#!/usr/bin/env bash +set -e + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) + +if [ $# -gt 0 ]; then + CONFIG="$1" +else + : ${CONFIG:="${possibleConfigs[0]}"} +fi + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi + +kernelVersion="$(uname -r)" +kernelMajor="${kernelVersion%%.*}" +kernelMinor="${kernelVersion#$kernelMajor.}" +kernelMinor="${kernelMinor%%.*}" + +is_set() { + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null +} +is_set_in_kernel() { + zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null +} +is_set_as_module() { + zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null +} + +# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors +declare -A colors=( + [black]=30 + [red]=31 + [green]=32 + [yellow]=33 + [blue]=34 + [magenta]=35 + [cyan]=36 + [white]=37 +) +color() { + color=() + if [ "$1" = 'bold' ]; then + color+=( '1' ) + shift + fi + if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then + color+=( "${colors[$1]}" ) + fi + local IFS=';' + echo -en '\033['"${color[*]}"m +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red +} + +check_flag() { + if is_set_in_kernel "$1"; then + wrap_good "CONFIG_$1" 'enabled' + elif is_set_as_module "$1"; then + wrap_good "CONFIG_$1" 'enabled (as module)' + else + wrap_bad "CONFIG_$1" 'missing' + fi +} + +check_flags() { + for flag in "$@"; do + echo "- $(check_flag "$flag")" + done +} + +check_command() { + if command -v "$1" >/dev/null 2>&1; then + wrap_good "$1 command" 'available' + else + wrap_bad "$1 command" 'missing' + fi +} + +check_device() { + if [ -c "$1" ]; then + wrap_good "$1" 'present' + else + wrap_bad "$1" 'missing' + fi +} + +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..." + for tryConfig in "${possibleConfigs[@]}"; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" + exit 1 + fi +fi + +wrap_color "info: reading kernel config from $CONFIG ..." white +echo + +echo 'Generally Necessary:' + +echo -n '- ' +cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" +cgroupDir="$(dirname "$cgroupSubsystemDir")" +if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + if [ "$cgroupSubsystemDir" ]; then + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" + else + echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" + fi + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + fi +fi + +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + DEVPTS_MULTIPLE_INSTANCES + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG + MACVLAN VETH BRIDGE BRIDGE_NETFILTER + NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE + NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} + NF_NAT NF_NAT_NEEDED + + # required for bind-mounting /dev/mqueue into containers + POSIX_MQUEUE +) +check_flags "${flags[@]}" +echo + +echo 'Optional Features:' +{ + check_flags MEMCG_KMEM MEMCG_SWAP MEMCG_SWAP_ENABLED + if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then + echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)" + fi +} + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then + check_flags RESOURCE_COUNTERS +fi + +flags=( + BLK_CGROUP IOSCHED_CFQ + CGROUP_PERF + CGROUP_HUGETLB + NET_CLS_CGROUP NETPRIO_CGROUP + CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED +) +check_flags "${flags[@]}" + +check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY +if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then + echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)" +fi + +check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY +if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then + echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)" +fi + +echo '- Storage Drivers:' +{ + echo '- "'$(wrap_color 'aufs' blue)'":' + check_flags AUFS_FS | sed 's/^/ /' + if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" + fi + + echo '- "'$(wrap_color 'btrfs' blue)'":' + check_flags BTRFS_FS | sed 's/^/ /' + + echo '- "'$(wrap_color 'devicemapper' blue)'":' + check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /' + + echo '- "'$(wrap_color 'overlay' blue)'":' + check_flags OVERLAY_FS | sed 's/^/ /' + + echo '- "'$(wrap_color 'zfs' blue)'":' + echo " - $(check_device /dev/zfs)" + echo " - $(check_command zfs)" + echo " - $(check_command zpool)" +} | sed 's/^/ /' +echo + +#echo 'Potential Future Features:' +#check_flags USER_NS +#echo diff --git a/contrib/completion/REVIEWERS b/contrib/completion/REVIEWERS new file mode 100644 index 00000000..03ee2dde --- /dev/null +++ b/contrib/completion/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker new file mode 100755 index 00000000..df2d1578 --- /dev/null +++ b/contrib/completion/bash/docker @@ -0,0 +1,1513 @@ +#!/bin/bash +# +# bash completion file for core docker commands +# +# This script provides completion of: +# - commands and their options +# - container ids and names +# - image repos and tags +# - filepaths +# +# To enable the completions either: +# - place this file in /etc/bash_completion.d +# or +# - copy this file to e.g. ~/.docker-completion.sh and add the line +# below to your .bashrc after bash completion features are loaded +# . ~/.docker-completion.sh +# +# Note: +# Currently, the completions will not work if the docker daemon is not +# bound to the default communication port/socket +# If the docker daemon is using a unix socket for communication your user +# must have access to the socket for the completions to function correctly +# +# Note for developers: +# Please arrange options sorted alphabetically by long name with the short +# options immediately following their corresponding long form. +# This order should be applied to lists, alternatives and code blocks. + +__docker_q() { + docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@" +} + +__docker_containers_all() { + local IFS=$'\n' + local containers=( $(__docker_q ps -aq --no-trunc) ) + if [ "$1" ]; then + containers=( $(__docker_q inspect --format "{{if $1}}{{.Id}}{{end}}" "${containers[@]}") ) + fi + local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) + names=( "${names[@]#/}" ) # trim off the leading "/" from the container names + unset IFS + COMPREPLY=( $(compgen -W "${names[*]} ${containers[*]}" -- "$cur") ) +} + +__docker_containers_running() { + __docker_containers_all '.State.Running' +} + +__docker_containers_stopped() { + __docker_containers_all 'not .State.Running' +} + +__docker_containers_pauseable() { + __docker_containers_all 'and .State.Running (not .State.Paused)' +} + +__docker_containers_unpauseable() { + __docker_containers_all '.State.Paused' +} + +__docker_container_names() { + local containers=( $(__docker_q ps -aq --no-trunc) ) + local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) + names=( "${names[@]#/}" ) # trim off the leading "/" from the container names + COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) +} + +__docker_container_ids() { + local containers=( $(__docker_q ps -aq) ) + COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) +} + +__docker_image_repos() { + local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" + COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) +} + +__docker_image_repos_and_tags() { + local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" + COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +__docker_image_repos_and_tags_and_ids() { + local images="$(__docker_q images -a --no-trunc | awk 'NR>1 { print $3; if ($1 != "") { print $1; print $1":"$2 } }')" + COMPREPLY=( $(compgen -W "$images" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +__docker_containers_and_images() { + __docker_containers_all + local containers=( "${COMPREPLY[@]}" ) + __docker_image_repos_and_tags_and_ids + COMPREPLY+=( "${containers[@]}" ) +} + +# Finds the position of the first word that is neither option nor an option's argument. +# If there are options that require arguments, you should pass a glob describing those +# options, e.g. "--option1|-o|--option2" +# Use this function to restrict completions to exact positions after the argument list. +__docker_pos_first_nonflag() { + local argument_flags=$1 + + local counter=$((command_pos + 1)) + while [ $counter -le $cword ]; do + if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then + (( counter++ )) + # eat "=" in case of --option=arg syntax + [ "${words[$counter]}" = "=" ] && (( counter++ )) + else + case "${words[$counter]}" in + -*) + ;; + *) + break + ;; + esac + fi + + # Bash splits words at "=", retaining "=" as a word, examples: + # "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words + while [ "${words[$counter + 1]}" = "=" ] ; do + counter=$(( counter + 2)) + done + + (( counter++ )) + done + + echo $counter +} + +# Returns the value of the first option matching option_glob. +# Valid values for option_glob are option names like '--log-level' and +# globs like '--log-level|-l' +# Only positions between the command and the current word are considered. +__docker_value_of_option() { + local option_glob=$1 + + local counter=$((command_pos + 1)) + while [ $counter -lt $cword ]; do + case ${words[$counter]} in + @($option_glob) ) + echo ${words[$counter + 1]} + break + ;; + esac + (( counter++ )) + done +} + +# Transforms a multiline list of strings into a single line string +# with the words separated by "|". +# This is used to prepare arguments to __docker_pos_first_nonflag(). +__docker_to_alternatives() { + local parts=( $1 ) + local IFS='|' + echo "${parts[*]}" +} + +# Transforms a multiline list of options into an extglob pattern +# suitable for use in case statements. +__docker_to_extglob() { + local extglob=$( __docker_to_alternatives "$1" ) + echo "@($extglob)" +} + +__docker_resolve_hostname() { + command -v host >/dev/null 2>&1 || return + COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) +} + +__docker_capabilities() { + # The list of capabilities is defined in types.go, ALL was added manually. + COMPREPLY=( $( compgen -W " + ALL + AUDIT_CONTROL + AUDIT_WRITE + AUDIT_READ + BLOCK_SUSPEND + CHOWN + DAC_OVERRIDE + DAC_READ_SEARCH + FOWNER + FSETID + IPC_LOCK + IPC_OWNER + KILL + LEASE + LINUX_IMMUTABLE + MAC_ADMIN + MAC_OVERRIDE + MKNOD + NET_ADMIN + NET_BIND_SERVICE + NET_BROADCAST + NET_RAW + SETFCAP + SETGID + SETPCAP + SETUID + SYS_ADMIN + SYS_BOOT + SYS_CHROOT + SYSLOG + SYS_MODULE + SYS_NICE + SYS_PACCT + SYS_PTRACE + SYS_RAWIO + SYS_RESOURCE + SYS_TIME + SYS_TTY_CONFIG + WAKE_ALARM + " -- "$cur" ) ) +} + +__docker_log_drivers() { + COMPREPLY=( $( compgen -W " + fluentd + gelf + journald + json-file + none + syslog + " -- "$cur" ) ) +} + +__docker_log_driver_options() { + # see docs/reference/logging/index.md + local fluentd_options="fluentd-address fluentd-tag" + local gelf_options="gelf-address gelf-tag" + local json_file_options="max-file max-size" + local syslog_options="syslog-address syslog-facility syslog-tag" + + case $(__docker_value_of_option --log-driver) in + '') + COMPREPLY=( $( compgen -W "$fluentd_options $gelf_options $json_file_options $syslog_options" -S = -- "$cur" ) ) + ;; + fluentd) + COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) ) + ;; + gelf) + COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) ) + ;; + json-file) + COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) ) + ;; + syslog) + COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + + compopt -o nospace +} + +__docker_complete_log_driver_options() { + # "=" gets parsed to a word and assigned to either $cur or $prev depending on whether + # it is the last character or not. So we search for "xxx=" in the the last two words. + case "${words[$cword-2]}$prev=" in + *gelf-address=*) + COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur#=}" ) ) + compopt -o nospace + return + ;; + *syslog-address=*) + COMPREPLY=( $( compgen -W "tcp udp unix" -S "://" -- "${cur#=}" ) ) + compopt -o nospace + return + ;; + *syslog-facility=*) + COMPREPLY=( $( compgen -W " + auth + authpriv + cron + daemon + ftp + kern + local0 + local1 + local2 + local3 + local4 + local5 + local6 + local7 + lpr + mail + news + syslog + user + uucp + " -- "${cur#=}" ) ) + return + ;; + esac + return 1 +} + +__docker_log_levels() { + COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) +} + +# a selection of the available signals that is most likely of interest in the +# context of docker containers. +__docker_signals() { + local signals=( + SIGCONT + SIGHUP + SIGINT + SIGKILL + SIGQUIT + SIGSTOP + SIGTERM + SIGUSR1 + SIGUSR2 + ) + COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) +} + +# global options that may appear after the docker command +_docker_docker() { + local boolean_options=" + $global_boolean_options + --help + --version -v + " + + case "$prev" in + --config) + _filedir -d + return + ;; + --log-level|-l) + __docker_log_levels + return + ;; + $(__docker_to_extglob "$global_options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $(__docker_to_extglob "$global_options_with_args") ) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_attach() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --no-stdin --sig-proxy" -- "$cur" ) ) + ;; + *) + local counter="$(__docker_pos_first_nonflag)" + if [ $cword -eq $counter ]; then + __docker_containers_running + fi + ;; + esac +} + +_docker_build() { + case "$prev" in + --cgroup-parent|--cpuset-cpus|--cpuset-mems|--cpu-shares|-c|--cpu-period|--cpu-quota|--memory|-m|--memory-swap) + return + ;; + --file|-f) + _filedir + return + ;; + --tag|-t) + __docker_image_repos_and_tags + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--cgroup-parent --cpuset-cpus --cpuset-mems --cpu-shares -c --cpu-period --cpu-quota --file -f --force-rm --help --memory -m --memory-swap --no-cache --pull --quiet -q --rm --tag -t --ulimit" -- "$cur" ) ) + ;; + *) + local counter="$(__docker_pos_first_nonflag '--cgroup-parent|--cpuset-cpus|--cpuset-mems|--cpu-shares|-c|--cpu-period|--cpu-quota|--file|-f|--memory|-m|--memory-swap|--tag|-t')" + if [ $cword -eq $counter ]; then + _filedir -d + fi + ;; + esac +} + +_docker_commit() { + case "$prev" in + --author|-a|--change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause -p" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') + + if [ $cword -eq $counter ]; then + __docker_containers_all + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_cp() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + case "$cur" in + *:) + return + ;; + *) + __docker_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + compopt -o nospace + return + ;; + esac + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + _filedir -d + return + fi + ;; + esac +} + +_docker_create() { + _docker_run +} + +_docker_daemon() { + local boolean_options=" + $global_boolean_options + --disable-legacy-registry + --help + --icc=false + --ip-forward=false + --ip-masq=false + --iptables=false + --ipv6 + --selinux-enabled + --userland-proxy=false + " + local options_with_args=" + $global_options_with_args + --api-cors-header + --bip + --bridge -b + --default-gateway + --default-gateway-v6 + --default-ulimit + --dns + --dns-search + --exec-driver -e + --exec-opt + --exec-root + --fixed-cidr + --fixed-cidr-v6 + --graph -g + --group -G + --insecure-registry + --ip + --label + --log-driver + --log-opt + --mtu + --pidfile -p + --registry-mirror + --storage-driver -s + --storage-opt + " + + case "$prev" in + --exec-root|--graph|-g) + _filedir -d + return + ;; + --log-driver) + __docker_log_drivers + return + ;; + --pidfile|-p|--tlscacert|--tlscert|--tlskey) + _filedir + return + ;; + --storage-driver|-s) + COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) + return + ;; + --storage-opt) + local devicemapper_options=" + dm.basesize + dm.blkdiscard + dm.blocksize + dm.fs + dm.loopdatasize + dm.loopmetadatasize + dm.mkfsarg + dm.mountopt + dm.override_udev_sync_check + dm.thinpooldev + " + local zfs_options="zfs.fsname" + + case $(__docker_value_of_option '--storage-driver|-s') in + '') + COMPREPLY=( $( compgen -W "$devicemapper_options $zfs_options" -S = -- "$cur" ) ) + ;; + devicemapper) + COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) ) + ;; + zfs) + COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + compopt -o nospace + return + ;; + --log-level|-l) + __docker_log_levels + return + ;; + --log-opt) + __docker_log_driver_options + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + __docker_complete_log_driver_options && return + + case "${words[$cword-2]}$prev=" in + *dm.blkdiscard=*) + COMPREPLY=( $( compgen -W "false true" -- "${cur#=}" ) ) + return + ;; + *dm.fs=*) + COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur#=}" ) ) + return + ;; + *dm.override_udev_sync_check=*) + COMPREPLY=( $( compgen -W "false true" -- "${cur#=}" ) ) + return + ;; + *dm.thinpooldev=*) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + esac +} + +_docker_diff() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_all + fi + ;; + esac +} + +_docker_events() { + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "container event image" -- "$cur" ) ) + compopt -o nospace + return + ;; + --since|--until) + return + ;; + esac + + case "${words[$cword-2]}$prev=" in + *container=*) + cur="${cur#=}" + __docker_containers_all + return + ;; + *event=*) + COMPREPLY=( $( compgen -W " + attach + commit + copy + create + delete + destroy + die + exec_create + exec_start + export + import + kill + oom + pause + pull + push + rename + resize + restart + start + stop + tag + top + unpause + untag + " -- "${cur#=}" ) ) + return + ;; + *image=*) + cur="${cur#=}" + __docker_image_repos_and_tags_and_ids + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --since --until" -- "$cur" ) ) + ;; + esac +} + +_docker_exec() { + case "$prev" in + --user|-u) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i -t --tty -u --user" -- "$cur" ) ) + ;; + *) + __docker_containers_running + ;; + esac +} + +_docker_export() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_all + fi + ;; + esac +} + +_docker_help() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) + fi +} + +_docker_history() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids + fi + ;; + esac +} + +_docker_images() { + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "dangling=true label=" -- "$cur" ) ) + if [ "$COMPREPLY" = "label=" ]; then + compopt -o nospace + fi + return + ;; + esac + + case "${words[$cword-2]}$prev=" in + *dangling=*) + COMPREPLY=( $( compgen -W "true false" -- "${cur#=}" ) ) + return + ;; + *label=*) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + =) + return + ;; + *) + __docker_image_repos + ;; + esac +} + +_docker_import() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_info() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_inspect() { + case "$prev" in + --format|-f) + return + ;; + --type) + COMPREPLY=( $( compgen -W "image container" -- "$cur" ) ) + return + ;; + + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --type --help" -- "$cur" ) ) + ;; + *) + case $(__docker_value_of_option --type) in + '') + __docker_containers_and_images + ;; + container) + __docker_containers_all + ;; + image) + __docker_image_repos_and_tags_and_ids + ;; + esac + esac +} + +_docker_kill() { + case "$prev" in + --signal|-s) + __docker_signals + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) + ;; + *) + __docker_containers_running + ;; + esac +} + +_docker_load() { + case "$prev" in + --input|-i) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --input -i" -- "$cur" ) ) + ;; + esac +} + +_docker_login() { + case "$prev" in + --email|-e|--password|-p|--username|-u) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--email -e --help --password -p --username -u" -- "$cur" ) ) + ;; + esac +} + +_docker_logout() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--follow -f --help --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--tail') + if [ $cword -eq $counter ]; then + __docker_containers_all + fi + ;; + esac +} + +_docker_pause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_pauseable + fi + ;; + esac +} + +_docker_port() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_all + fi + ;; + esac +} + +_docker_ps() { + case "$prev" in + --before|--since) + __docker_containers_all + ;; + --filter|-f) + COMPREPLY=( $( compgen -S = -W "exited id label name status" -- "$cur" ) ) + compopt -o nospace + return + ;; + --format|-n) + return + ;; + esac + + case "${words[$cword-2]}$prev=" in + *id=*) + cur="${cur#=}" + __docker_container_ids + return + ;; + *name=*) + cur="${cur#=}" + __docker_container_names + return + ;; + *status=*) + COMPREPLY=( $( compgen -W "exited paused restarting running" -- "${cur#=}" ) ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --before --filter -f --format --help --latest -l -n --no-trunc --quiet -q --size -s --since" -- "$cur" ) ) + ;; + esac +} + +_docker_pull() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all-tags -a --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --all-tags|-a) + __docker_image_repos + return + ;; + esac + done + __docker_image_repos_and_tags + fi + ;; + esac +} + +_docker_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + fi + ;; + esac +} + +_docker_rename() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_all + fi + ;; + esac +} + +_docker_restart() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_containers_all + ;; + esac +} + +_docker_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) + ;; + *) + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --force|-f) + __docker_containers_all + return + ;; + esac + done + __docker_containers_stopped + ;; + esac +} + +_docker_rmi() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) + ;; + *) + __docker_image_repos_and_tags_and_ids + ;; + esac +} + +_docker_run() { + local options_with_args=" + --add-host + --attach -a + --blkio-weight + --cap-add + --cap-drop + --cgroup-parent + --cidfile + --cpu-period + --cpu-quota + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --device + --dns + --dns-search + --entrypoint + --env -e + --env-file + --expose + --group-add + --hostname -h + --ipc + --label-file + --label -l + --link + --log-driver + --log-opt + --lxc-conf + --mac-address + --memory -m + --memory-swap + --memory-swappiness + --name + --net + --pid + --publish -p + --restart + --security-opt + --ulimit + --user -u + --uts + --volumes-from + --volume -v + --workdir -w + " + + local all_options="$options_with_args + --disable-content-trust=false + --help + --interactive -i + --oom-kill-disable + --privileged + --publish-all -P + --read-only + --tty -t + " + + [ "$command" = "run" ] && all_options="$all_options + --detach -d + --rm + --sig-proxy=false + " + + local options_with_args_glob=$(__docker_to_extglob "$options_with_args") + + case "$prev" in + --add-host) + case "$cur" in + *:) + __docker_resolve_hostname + return + ;; + esac + ;; + --attach|-a) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cap-add|--cap-drop) + __docker_capabilities + return + ;; + --cidfile|--env-file|--label-file) + _filedir + return + ;; + --device|--volume|-v) + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + compopt -o nospace + ;; + /*) + _filedir + compopt -o nospace + ;; + esac + return + ;; + --env|-e) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + compopt -o nospace + return + ;; + --ipc) + case "$cur" in + *:*) + cur="${cur#*:}" + __docker_containers_running + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + compopt -o nospace + fi + ;; + esac + return + ;; + --link) + case "$cur" in + *:*) + ;; + *) + __docker_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + compopt -o nospace + ;; + esac + return + ;; + --log-driver) + __docker_log_drivers + return + ;; + --log-opt) + __docker_log_driver_options + return + ;; + --net) + case "$cur" in + container:*) + local cur=${cur#*:} + __docker_containers_all + ;; + *) + COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + compopt -o nospace + fi + ;; + esac + return + ;; + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") ) + ;; + esac + return + ;; + --security-opt) + case "$cur" in + label:*:*) + ;; + label:*) + local cur=${cur##*:} + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + compopt -o nospace + fi + ;; + *) + COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") ) + compopt -o nospace + ;; + esac + return + ;; + --volumes-from) + __docker_containers_all + return + ;; + $options_with_args_glob ) + return + ;; + esac + + __docker_complete_log_driver_options && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids + fi + ;; + esac +} + +_docker_save() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + __docker_image_repos_and_tags_and_ids + ;; + esac +} + +_docker_search() { + case "$prev" in + --stars|-s) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--automated --help --no-trunc --stars -s" -- "$cur" ) ) + ;; + esac +} + +_docker_start() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--attach -a --help --interactive -i" -- "$cur" ) ) + ;; + *) + __docker_containers_stopped + ;; + esac +} + +_docker_stats() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--no-stream --help" -- "$cur" ) ) + ;; + *) + __docker_containers_running + ;; + esac +} + +_docker_stop() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_containers_running + ;; + esac +} + +_docker_tag() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_unpause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_unpauseable + fi + ;; + esac +} + +_docker_top() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_running + fi + ;; + esac +} + +_docker_version() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_wait() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_containers_all + ;; + esac +} + +_docker() { + local previous_extglob_setting=$(shopt -p extglob) + shopt -s extglob + + local commands=( + attach + build + commit + cp + create + daemon + diff + events + exec + export + history + images + import + info + inspect + kill + load + login + logout + logs + pause + port + ps + pull + push + rename + restart + rm + rmi + run + save + search + start + stats + stop + tag + top + unpause + version + wait + ) + + # These options are valid as global options for all client commands + # and valid as command options for `docker daemon` + local global_boolean_options=" + --debug -D + --tls + --tlsverify + " + local global_options_with_args=" + --config + --host -H + --log-level -l + --tlscacert + --tlscert + --tlskey + " + + local host config + + COMPREPLY=() + local cur prev words cword + _get_comp_words_by_ref -n : cur prev words cword + + local command='docker' command_pos=0 + local counter=1 + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + # save host so that completion can use custom daemon + --host|-H) + (( counter++ )) + host="${words[$counter]}" + ;; + # save config so that completion can use custom configuration directories + --config) + (( counter++ )) + config="${words[$counter]}" + ;; + $(__docker_to_extglob "$global_options_with_args") ) + (( counter++ )) + ;; + -*) + ;; + =) + (( counter++ )) + ;; + *) + command="${words[$counter]}" + command_pos=$counter + break + ;; + esac + (( counter++ )) + done + + local completions_func=_docker_${command} + declare -F $completions_func >/dev/null && $completions_func + + eval "$previous_extglob_setting" + return 0 +} + +complete -F _docker docker diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish new file mode 100644 index 00000000..e146ae6c --- /dev/null +++ b/contrib/completion/fish/docker.fish @@ -0,0 +1,399 @@ +# docker.fish - docker completions for fish shell +# +# This file is generated by gen_docker_fish_completions.py from: +# https://github.com/barnybug/docker-fish-completion +# +# To install the completions: +# mkdir -p ~/.config/fish/completions +# cp docker.fish ~/.config/fish/completions +# +# Completion supported: +# - parameters +# - commands +# - containers +# - images +# - repositories + +function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' + for i in (commandline -opc) + if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats + return 1 + end + end + return 0 +end + +function __fish_print_docker_containers --description 'Print a list of docker containers' -a select + switch $select + case running + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n' + case stopped + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n' + case all + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n' + end +end + +function __fish_print_docker_images --description 'Print a list of docker images' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' +end + +function __fish_print_docker_repositories --description 'Print a list of docker repositories' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq +end + +# common options +complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the remote API. Default is cors disabled" +complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' +complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" +complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' +complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the Docker runtime to use a specific exec driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set exec driver options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' +complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' +complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" +complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" +complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' +complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level (debug, info, warn, error, fatal)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' +complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' +complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' + +# subcommands +# attach +complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" + +# build +complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' + +# commit +complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" + +# cp +complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path" +complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' + +# create +complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l lxc-conf -d '(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: , where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" + +# diff +complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" + +# events +complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' + +# exec +complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" + +# export +complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" + +# history +complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" + +# images +complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" + +# import +complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' +complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' + +# info +complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' + +# inspect +complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" + +# kill +complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" + +# load +complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' + +# login +complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or log in to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' + +# logout +complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' + +# logs +complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" + +# port +complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" + +# pause +complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" + +# ps +complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' + +# pull +complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" + +# push +complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" + +# rename +complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' + +# restart +complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" + +# rm +complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" + +# rmi +complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" + +# run +complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d '(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: , where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" + +# save +complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" + +# search +complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' + +# start +complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" + +# stats +complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" + +# stop +complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" + +# tag +complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' + +# top +complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" + +# unpause +complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' +complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" + +# version +complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' + +# wait +complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" + + diff --git a/contrib/completion/zsh/REVIEWERS b/contrib/completion/zsh/REVIEWERS new file mode 100644 index 00000000..03ee2dde --- /dev/null +++ b/contrib/completion/zsh/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker new file mode 100644 index 00000000..37c4c8c2 --- /dev/null +++ b/contrib/completion/zsh/_docker @@ -0,0 +1,663 @@ +#compdef docker +# +# zsh completion for docker (http://docker.com) +# +# version: 0.3.0 +# github: https://github.com/felixr/docker-zsh-completion +# +# contributors: +# - Felix Riedel +# - Steve Durrheimer +# - Vincent Bernat +# +# license: +# +# Copyright (c) 2013, Felix Riedel +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +__docker_get_containers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local kind + declare -a running stopped lines args + + kind=$1 + shift + [[ $kind = (stopped|all) ]] && args=($args -a) + + lines=(${(f)"$(_call_program commands docker $docker_options ps $args)"}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + lines=(${lines[2,-1]}) + + # Container ID + local line + local s + for line in $lines; do + s="${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}" + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + + # Names + local name + local -a names + for line in $lines; do + names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}}) + for name in $names; do + s="${name}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped ${s#*/}) + else + running=($running ${s#*/}) + fi + done + done + + [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0 + [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0 + return ret +} + +__docker_stoppedcontainers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers stopped "$@" +} + +__docker_runningcontainers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers running "$@" +} + +__docker_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all "$@" +} + +__docker_images() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a images + images=(${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) + _describe -t docker-images "images" images && ret=0 + __docker_repositories_with_tags && ret=0 + return ret +} + +__docker_repositories() { + [[ $PREFIX = -* ]] && return 1 + declare -a repos + repos=(${${${(f)"$(_call_program commands docker $docker_options images)"}%% *}[2,-1]}) + repos=(${repos#}) + _describe -t docker-repos "repositories" repos +} + +__docker_repositories_with_tags() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos onlyrepos matched + declare m + repos=(${${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/ ##/:::}%% *}) + repos=(${${repos%:::}#}) + # Check if we have a prefix-match for the current prefix. + onlyrepos=(${repos%::*}) + for m in $onlyrepos; do + [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && { + # Yes, complete with tags + repos=(${${repos/:::/:}/:/\\:}) + _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0 + return ret + } + done + # No, only complete repositories + onlyrepos=(${${repos%:::*}/:/\\:}) + _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0 + + return ret +} + +__docker_search() { + [[ $PREFIX = -* ]] && return 1 + local cache_policy + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + local searchterm cachename + searchterm="${words[$CURRENT]%/}" + cachename=_docker-search-$searchterm + + local expl + local -a result + if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ + && ! _retrieve_cache ${cachename#_}; then + _message "Searching for ${searchterm}..." + result=(${${${(f)"$(_call_program commands docker $docker_options search $searchterm)"}%% *}[2,-1]}) + _store_cache ${cachename#_} result + fi + _wanted dockersearch expl 'available images' compadd -a result +} + +__docker_caching_policy() { + oldp=( "$1"(Nmh+1) ) # 1 hour + (( $#oldp )) +} + +__docker_commands() { + local cache_policy + + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ + && ! _retrieve_cache docker_subcommands; + then + local -a lines + lines=(${(f)"$(_call_program commands docker 2>&1)"}) + _docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:}) + _docker_subcommands=($_docker_subcommands 'help:Show help for a command') + _store_cache docker_subcommands _docker_subcommands + fi + _describe -t docker-commands "docker command" _docker_subcommands +} + +__docker_subcommand() { + local -a _command_args opts_help opts_cpumem opts_create + local expl help="-h --help" + integer ret=1 + + opts_help=("(: -)"{-h,--help}"[Print usage]") + opts_cpumem=( + "($help -c --cpu-shares)"{-c,--cpu-shares=-}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" + "($help)--cgroup-parent=-[Parent cgroup for the container]:cgroup: " + "($help)--cpu-period=-[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " + "($help)--cpu-quota=-[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " + "($help)--cpuset-cpus=-[CPUs in which to allow execution]:CPUs: " + "($help)--cpuset-mems=-[MEMs in which to allow execution]:MEMs: " + "($help -m --memory)"{-m,--memory=-}"[Memory limit]:Memory limit: " + "($help)--memory-swap=-[Total memory limit with swap]:Memory limit: " + ) + opts_create=( + "($help -a --attach)"{-a,--attach=-}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)" + "($help)*--add-host=-[Add a custom host-to-IP mapping]:host\:ip mapping: " + "($help)--blkio-weight=-[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)" + "($help)*--cap-add=-[Add Linux capabilities]:capability: " + "($help)*--cap-drop=-[Drop Linux capabilities]:capability: " + "($help)--cidfile=-[Write the container ID to the file]:CID file:_files" + "($help)*--device=-[Add a host device to the container]:device:_files" + "($help)*--dns=-[Set custom dns servers]:dns server: " + "($help)*--dns-search=-[Set custom DNS search domains]:dns domains: " + "($help)*"{-e,--env=-}"[Set environment variables]:environment variable: " + "($help)--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: " + "($help)*--env-file=-[Read environment variables from a file]:environment file:_files" + "($help)*--expose=-[Expose a port from the container without publishing it]: " + "($help)*--group-add=-[Add additional groups to run as]:group:_groups" + "($help -h --hostname)"{-h,--hostname=-}"[Container host name]:hostname:_hosts" + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" + "($help)--ipc=-[IPC namespace to use]:IPC namespace: " + "($help)*--link=-[Add link to another container]:link:->link" + "($help)*"{-l,--label=-}"[Set meta data on a container]:label: " + "($help)--log-driver=-[Default driver for container logs]:Logging driver:(json-file syslog journald gelf fluentd none)" + "($help)*--log-opt=-[Log driver specific options]:log driver options: " + "($help)*--lxc-conf=-[Add custom lxc options]:lxc options: " + "($help)--mac-address=-[Container MAC address]:MAC address: " + "($help)--name=-[Container name]:name: " + "($help)--net=-[Network mode]:network mode:(bridge none container host)" + "($help)--oom-kill-disable[Disable OOM Killer]" + "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" + "($help)*"{-p,--publish=-}"[Expose a container's port to the host]:port:_ports" + "($help)--pid=-[PID namespace to use]:PID: " + "($help)--privileged[Give extended privileges to this container]" + "($help)--read-only[Mount the container's root filesystem as read only]" + "($help)--restart=-[Restart policy]:restart policy:(no on-failure always)" + "($help)*--security-opt=-[Security options]:security option: " + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" + "($help -u --user)"{-u,--user=-}"[Username or UID]:user:_users" + "($help)*--ulimit=-[ulimit options]:ulimit: " + "($help)*-v[Bind mount a volume]:volume: " + "($help)*--volumes-from=-[Mount volumes from the specified container]:volume: " + "($help -w --workdir)"{-w,--workdir=-}"[Working directory inside the container]:directory:_directories" + ) + + case "$words[1]" in + (attach) + _arguments \ + $opts_help \ + "($help)--no-stdin[Do not attach stdin]" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help -):containers:__docker_runningcontainers" && ret=0 + ;; + (build) + _arguments \ + $opts_help \ + $opts_cpumem \ + "($help -f --file)"{-f,--file=-}"[Name of the Dockerfile]:Dockerfile:_files" \ + "($help)--force-rm[Always remove intermediate containers]" \ + "($help)--no-cache[Do not use cache when building the image]" \ + "($help)--pull[Attempt to pull a newer version of the image]" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \ + "($help)--rm[Remove intermediate containers after a successful build]" \ + "($help -t --tag)"{-t,--tag=-}"[Repository, name and tag for the image]: :__docker_repositories_with_tags" \ + "($help -):path or URL:_directories" && ret=0 + ;; + (commit) + _arguments \ + $opts_help \ + "($help -a --author)"{-a,--author=-}"[Author]:author: " \ + "($help -c --change)*"{-c,--change=-}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m,--message=-}"[Commit message]:message: " \ + "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \ + "($help -):container:__docker_containers" \ + "($help -): :__docker_repositories_with_tags" && ret=0 + ;; + (cp) + _arguments \ + $opts_help \ + "($help -)1:container:->container" \ + "($help -)2:hostpath:_files" && ret=0 + case $state in + (container) + if compset -P "*:"; then + _files && ret=0 + else + __docker_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + _arguments \ + $opts_help \ + $opts_cpumem \ + $opts_create \ + "($help -): :__docker_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_runningcontainers -qS ":" && ret=0 + fi + ;; + esac + + ;; + (diff) + _arguments \ + $opts_help \ + "($help -)*:containers:__docker_containers" && ret=0 + ;; + (events) + _arguments \ + $opts_help \ + "($help)*"{-f,--filter=-}"[Filter values]:filter: " \ + "($help)--since=-[Events created since this timestamp]:timestamp: " \ + "($help)--until=-[Events created until this timestamp]:timestamp: " && ret=0 + ;; + (exec) + local state + _arguments \ + $opts_help \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \ + "($help -u --user)"{-u,--user=-}"[Username or UID]:user:_users" \ + "($help -):containers:__docker_runningcontainers" \ + "($help -)*::command:->anycommand" && ret=0 + + case $state in + (anycommand) + shift 1 words + (( CURRENT-- )) + _normal && ret=0 + ;; + esac + ;; + (export) + _arguments \ + $opts_help \ + "($help -o --output)"{-o,--output=-}"[Write to a file, instead of stdout]:output file:_files" \ + "($help -)*:containers:__docker_containers" && ret=0 + ;; + (history) + _arguments \ + $opts_help \ + "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -)*: :__docker_images" && ret=0 + ;; + (images) + _arguments \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all images]" \ + "($help)--digest[Show digests]" \ + "($help)*"{-f,--filter=-}"[Filter values]:filter: " \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -): :__docker_repositories" && ret=0 + ;; + (import) + _arguments \ + $opts_help \ + "($help -c --change)*"{-c,--change=-}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -):URL:(- http:// file://)" \ + "($help -): :__docker_repositories_with_tags" && ret=0 + ;; + (info|version) + _arguments \ + $opts_help && ret=0 + ;; + (inspect) + _arguments \ + $opts_help \ + "($help -f --format=-)"{-f,--format=-}"[Format the output using the given go template]:template: " \ + "($help)--type=-[Return JSON for specified type]:type:(image container)" \ + "($help -)*:containers:__docker_containers" && ret=0 + ;; + (kill) + _arguments \ + $opts_help \ + "($help -s --signal)"{-s,--signal=-}"[Signal to send]:signal:_signals" \ + "($help -)*:containers:__docker_runningcontainers" && ret=0 + ;; + (load) + _arguments \ + $opts_help \ + "($help -i --input)"{-i,--input=-}"[Read from tar archive file]:archive file:_files -g "*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)"" && ret=0 + ;; + (login) + _arguments \ + $opts_help \ + "($help -e --email)"{-e,--email=-}"[Email]:email: " \ + "($help -p --password)"{-p,--password=-}"[Password]:password: " \ + "($help -u --user)"{-u,--user=-}"[Username]:username: " \ + "($help -)1:server: " && ret=0 + ;; + (logout) + _arguments \ + $opts_help \ + "($help -)1:server: " && ret=0 + ;; + (logs) + _arguments \ + $opts_help \ + "($help -f --follow)"{-f,--follow}"[Follow log output]" \ + "($help -s --since)"{-s,--since=-}"[Show logs since this timestamp]:timestamp: " \ + "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ + "($help)--tail=-[Output the last K lines]:lines:(1 10 20 50 all)" \ + "($help -)*:containers:__docker_containers" && ret=0 + ;; + (pause|unpause) + _arguments \ + $opts_help \ + "($help -)*:containers:__docker_runningcontainers" && ret=0 + ;; + (port) + _arguments \ + $opts_help \ + "($help -)1:containers:__docker_runningcontainers" \ + "($help -)2:port:_ports" && ret=0 + ;; + (ps) + _arguments \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers]" \ + "($help)--before=-[Show only container created before...]:containers:__docker_containers" \ + "($help)*"{-f,--filter=-}"[Filter values]:filter: " \ + "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \ + "($help)-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help)--since=-[Show only containers created since...]:containers:__docker_containers" && ret=0 + ;; + (pull) + _arguments \ + $opts_help \ + "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \ + "($help -):name:__docker_search" && ret=0 + ;; + (push) + _arguments \ + $opts_help \ + "($help -): :__docker_images" && ret=0 + ;; + (rename) + _arguments \ + $opts_help \ + "($help -):old name:__docker_containers" \ + "($help -):new name: " && ret=0 + ;; + (restart|stop) + _arguments \ + $opts_help \ + "($help -t --time=-)"{-t,--time=-}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_runningcontainers" && ret=0 + ;; + (rm) + _arguments \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \ + "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \ + "($help -)*:containers:__docker_stoppedcontainers" && ret=0 + ;; + (rmi) + _arguments \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help)--no-prune[Do not delete untagged parents]" \ + "($help -)*: :__docker_images" && ret=0 + ;; + (run) + _arguments \ + $opts_help \ + $opts_cpumem \ + $opts_create \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)--rm[Remove intermediate containers when it exits]" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help -): :__docker_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_runningcontainers -qS ":" && ret=0 + fi + ;; + esac + + ;; + (save) + _arguments \ + $opts_help \ + "($help -o --output)"{-o,--output=-}"[Write to file]:file:_files" \ + "($help -)*: :__docker_images" && ret=0 + ;; + (search) + _arguments \ + $opts_help \ + "($help)--automated[Only show automated builds]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -s --stars)"{-s,--stars=-}"[Only display with at least X stars]:stars:(0 10 100 1000)" \ + "($help -):term: " && ret=0 + ;; + (start) + _arguments \ + $opts_help \ + "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ + "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ + "($help -)*:containers:__docker_stoppedcontainers" && ret=0 + ;; + (stats) + _arguments \ + $opts_help \ + "($help)--no-stream[Disable streaming stats and only pull the first result]" \ + "($help -)*:containers:__docker_runningcontainers" && ret=0 + ;; + (tag) + _arguments \ + $opts_help \ + "($help -f --force)"{-f,--force}"[force]"\ + "($help -):source:__docker_images"\ + "($help -):destination:__docker_repositories_with_tags" && ret=0 + ;; + (top) + _arguments \ + $opts_help \ + "($help -)1:containers:__docker_runningcontainers" \ + "($help -)*:: :->ps-arguments" && ret=0 + case $state in + (ps-arguments) + _ps && ret=0 + ;; + esac + + ;; + (wait) + _arguments \ + $opts_help \ + "($help -)*:containers:__docker_runningcontainers" && ret=0 + ;; + (help) + _arguments ":subcommand:__docker_commands" && ret=0 + ;; + esac + + return ret +} + +_docker() { + # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. + # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. + if [[ $service != docker ]]; then + _call_function - _$service + return + fi + + local curcontext="$curcontext" state line help="-h --help" + integer ret=1 + typeset -A opt_args + + _arguments -C \ + "(: -)"{-h,--help}"[Print usage]" \ + "($help)--api-cors-header=-[Set CORS headers in the remote API]:CORS headers: " \ + "($help -b --bridge)"{-b,--bridge=-}"[Attach containers to a network bridge]:bridge:_net_interfaces" \ + "($help)--bip=-[Specify network bridge IP]" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help -d --daeamon)"{-d,--daemon}"[Enable daemon mode]" \ + "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \ + "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \ + "($help)--disable-legacy-registry[Do not contact legacy registries]" \ + "($help)*--dns=-[DNS server to use]:DNS: " \ + "($help)*--dns-search=-[DNS search domains to use]" \ + "($help)*--default-ulimit=-[Set default ulimit settings for containers]:ulimit: " \ + "($help -e --exec-driver)"{-e,--exec-driver=-}"[Exec driver to use]:driver:(native lxc windows)" \ + "($help)*--exec-opt=-[Set exec driver options]:exec driver options: " \ + "($help)--exec-root=-[Root of the Docker execdriver]:path:_directories" \ + "($help)--fixed-cidr=-[IPv4 subnet for fixed IPs]:IPv4 subnet: " \ + "($help)--fixed-cidr-v6=-[IPv6 subnet for fixed IPs]:IPv6 subnet: " \ + "($help -G --group)"{-G,--group=-}"[Group for the unix socket]:group:_groups" \ + "($help -g --graph)"{-g,--graph=-}"[Root of the Docker runtime]:path:_directories" \ + "($help -H --host)"{-H,--host=-}"[tcp://host:port to bind/connect to]:host: " \ + "($help)--icc[Enable inter-container communication]" \ + "($help)*--insecure-registry=-[Enable insecure registry communication]:registry: " \ + "($help)--ip=-[Default IP when binding container ports]" \ + "($help)--ip-forward[Enable net.ipv4.ip_forward]" \ + "($help)--ip-masq[Enable IP masquerading]" \ + "($help)--iptables[Enable addition of iptables rules]" \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help -l --log-level)"{-l,--log-level=-}"[Set the logging level]:level:(debug info warn error fatal)" \ + "($help)*--label=-[Set key=value labels to the daemon]:label: " \ + "($help)--log-driver=-[Default driver for container logs]:Logging driver:(json-file syslog journald gelf fluentd none)" \ + "($help)*--log-opt=-[Log driver specific options]:log driver options: " \ + "($help)--mtu=-[Set the containers network MTU]:mtu:(0 576 1420 1500 9000)" \ + "($help -p --pidfile)"{-p,--pidfile=-}"[Path to use for daemon PID file]:PID file:_files" \ + "($help)*--registry-mirror=-[Preferred Docker registry mirror]:registry mirror: " \ + "($help -s --storage-driver)"{-s,--storage-driver=-}"[Storage driver to use]:driver:(aufs devicemapper btrfs zfs overlay)" \ + "($help)--selinux-enabled[Enable selinux support]" \ + "($help)*--storage-opt=-[Set storage driver options]:storage driver options: " \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=-[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlscert=-[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlskey=-[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help -v --version)"{-v,--version}"[Print version information and quit]" \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + local host=${opt_args[-H]}${opt_args[--host]} + local docker_options=${host:+--host $host} + + case $state in + (command) + __docker_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-$words[1]: + __docker_subcommand && ret=0 + ;; + esac + + return ret +} + +_docker "$@" + +# Local Variables: +# mode: Shell-Script +# sh-indentation: 4 +# indent-tabs-mode: nil +# sh-basic-offset: 4 +# End: +# vim: ft=zsh sw=4 ts=4 et diff --git a/contrib/desktop-integration/README.md b/contrib/desktop-integration/README.md new file mode 100644 index 00000000..85a01b9e --- /dev/null +++ b/contrib/desktop-integration/README.md @@ -0,0 +1,11 @@ +Desktop Integration +=================== + +The ./contrib/desktop-integration contains examples of typical dockerized +desktop applications. + +Examples +======== + +* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application +* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff --git a/contrib/desktop-integration/chromium/Dockerfile b/contrib/desktop-integration/chromium/Dockerfile new file mode 100644 index 00000000..5cacd1f9 --- /dev/null +++ b/contrib/desktop-integration/chromium/Dockerfile @@ -0,0 +1,36 @@ +# VERSION: 0.1 +# DESCRIPTION: Create chromium container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a Chromium container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download Chromium Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile +# +# # Build chromium image +# docker build -t chromium . +# +# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data +# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# # To run stateful dockerized data containers +# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Chromium +RUN apt-get update && apt-get install -y \ + chromium \ + chromium-l10n \ + libcanberra-gtk-module \ + libexif-dev \ + --no-install-recommends + +# Autorun chromium +CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff --git a/contrib/desktop-integration/gparted/Dockerfile b/contrib/desktop-integration/gparted/Dockerfile new file mode 100644 index 00000000..3ddb2320 --- /dev/null +++ b/contrib/desktop-integration/gparted/Dockerfile @@ -0,0 +1,31 @@ +# VERSION: 0.1 +# DESCRIPTION: Create gparted container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a gparted container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download gparted Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile +# +# # Build gparted image +# docker build -t gparted . +# +# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ +# --device=/dev/sda:/dev/sda \ +# -e DISPLAY=unix$DISPLAY gparted +# + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Gparted and its dependencies +RUN apt-get update && apt-get install -y \ + gparted \ + libcanberra-gtk-module \ + --no-install-recommends + +# Autorun gparted +CMD ["/usr/sbin/gparted"] diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go new file mode 100644 index 00000000..0a0b0803 --- /dev/null +++ b/contrib/docker-device-tool/device_tool.go @@ -0,0 +1,174 @@ +package main + +import ( + "flag" + "fmt" + "os" + "path" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/pkg/devicemapper" +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) + flag.PrintDefaults() + os.Exit(1) +} + +func byteSizeFromString(arg string) (int64, error) { + digits := "" + rest := "" + last := strings.LastIndexAny(arg, "0123456789") + if last >= 0 { + digits = arg[:last+1] + rest = arg[last+1:] + } + + val, err := strconv.ParseInt(digits, 10, 64) + if err != nil { + return val, err + } + + rest = strings.ToLower(strings.TrimSpace(rest)) + + var multiplier int64 = 1 + switch rest { + case "": + multiplier = 1 + case "k", "kb": + multiplier = 1024 + case "m", "mb": + multiplier = 1024 * 1024 + case "g", "gb": + multiplier = 1024 * 1024 * 1024 + case "t", "tb": + multiplier = 1024 * 1024 * 1024 * 1024 + default: + return 0, fmt.Errorf("Unknown size unit: %s", rest) + } + + return val * multiplier, nil +} + +func main() { + root := flag.String("r", "/var/lib/docker", "Docker root dir") + flDebug := flag.Bool("D", false, "Debug mode") + + flag.Parse() + + if *flDebug { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) + } + + if flag.NArg() < 1 { + usage() + } + + args := flag.Args() + + home := path.Join(*root, "devicemapper") + devices, err := devmapper.NewDeviceSet(home, false, nil) + if err != nil { + fmt.Println("Can't initialize device mapper: ", err) + os.Exit(1) + } + + switch args[0] { + case "status": + status := devices.Status() + fmt.Printf("Pool name: %s\n", status.PoolName) + fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) + fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) + fmt.Printf("Sector size: %d\n", status.SectorSize) + fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) + fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) + break + case "list": + ids := devices.List() + sort.Strings(ids) + for _, id := range ids { + fmt.Println(id) + } + break + case "device": + if flag.NArg() < 2 { + usage() + } + status, err := devices.GetDeviceStatus(args[1]) + if err != nil { + fmt.Println("Can't get device info: ", err) + os.Exit(1) + } + fmt.Printf("Id: %d\n", status.DeviceId) + fmt.Printf("Size: %d\n", status.Size) + fmt.Printf("Transaction Id: %d\n", status.TransactionId) + fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) + fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) + fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) + break + case "resize": + if flag.NArg() < 2 { + usage() + } + + size, err := byteSizeFromString(args[1]) + if err != nil { + fmt.Println("Invalid size: ", err) + os.Exit(1) + } + + err = devices.ResizePool(size) + if err != nil { + fmt.Println("Error resizing pool: ", err) + os.Exit(1) + } + + break + case "snap": + if flag.NArg() < 3 { + usage() + } + + err := devices.AddDevice(args[1], args[2]) + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + case "remove": + if flag.NArg() < 2 { + usage() + } + + err := devicemapper.RemoveDevice(args[1]) + if err != nil { + fmt.Println("Can't remove device: ", err) + os.Exit(1) + } + break + case "mount": + if flag.NArg() < 3 { + usage() + } + + err := devices.MountDevice(args[1], args[2], "") + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + default: + fmt.Printf("Unknown command %s\n", args[0]) + usage() + + os.Exit(1) + } + + return +} diff --git a/contrib/dockerize-disk.sh b/contrib/dockerize-disk.sh new file mode 100755 index 00000000..444e243a --- /dev/null +++ b/contrib/dockerize-disk.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +set -e + +if ! command -v qemu-nbd &> /dev/null; then + echo >&2 'error: "qemu-nbd" not found!' + exit 1 +fi + +usage() { + echo "Convert disk image to docker image" + echo "" + echo "usage: $0 image-name disk-image-file [ base-image ]" + echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" + echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" +} + +if [ "$#" -lt 2 ]; then + usage + exit 1 +fi + +CURDIR=$(pwd) + +image_name="${1%:*}" +image_tag="${1#*:}" +if [ "$image_tag" == "$1" ]; then + image_tag="latest" +fi + +disk_image_file="$2" +docker_base_image="$3" + +block_device=/dev/nbd0 + +builddir=$(mktemp -d) + +cleanup() { + umount "$builddir/disk_image" || true + umount "$builddir/workdir" || true + qemu-nbd -d $block_device &> /dev/null || true + rm -rf $builddir +} +trap cleanup EXIT + +# Mount disk image +modprobe nbd max_part=63 +qemu-nbd -rc ${block_device} -P 1 "$disk_image_file" +mkdir "$builddir/disk_image" +mount -o ro ${block_device} "$builddir/disk_image" + +mkdir "$builddir/workdir" +mkdir "$builddir/diff" + +base_image_mounts="" + +# Unpack base image +if [ -n "$docker_base_image" ]; then + mkdir -p "$builddir/base" + docker pull "$docker_base_image" + docker save "$docker_base_image" | tar -xC "$builddir/base" + + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + while [ -n "$image_id" ]; do + mkdir -p "$builddir/base/$image_id/layer" + tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" + + base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh" + image_id=$(docker inspect -f "{{.Parent}}" "$image_id") + done +fi + +# Mount work directory +mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir" + +# Update files +cd $builddir +LC_ALL=C diff -rq disk_image workdir \ + | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ + | while read action entry; do + case "$action" in + ADD|UPDATE) + cp -a "disk_image$entry" "workdir$entry" + ;; + DEL) + rm -rf "workdir$entry" + ;; + *) + echo "Error: unknown diff line: $action $entry" >&2 + ;; + esac + done + +# Pack new image +new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" +mkdir -p $builddir/result/$new_image_id +cd diff +tar -cf $builddir/result/$new_image_id/layer.tar * +echo "1.0" > $builddir/result/$new_image_id/VERSION +cat > $builddir/result/$new_image_id/json <<-EOS +{ "docker_version": "1.4.1" +, "id": "$new_image_id" +, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" +EOS + +if [ -n "$docker_base_image" ]; then + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json +fi + +echo "}" >> $builddir/result/$new_image_id/json + +echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories + +cd $builddir/result + +# mkdir -p $CURDIR/$image_name +# cp -r * $CURDIR/$image_name +tar -c * | docker load diff --git a/contrib/download-frozen-image.sh b/contrib/download-frozen-image.sh new file mode 100755 index 00000000..29d7ff59 --- /dev/null +++ b/contrib/download-frozen-image.sh @@ -0,0 +1,108 @@ +#!/bin/bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@image-id] ..." + echo " ie: $0 /tmp/hello-world hello-world" + echo " $0 /tmp/debian-jessie debian:jessie" + echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" + echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + tag="${imageTag#*:}" + imageId="${tag##*@}" + [ "$imageId" != "$tag" ] || imageId= + [ "$tag" != "$imageTag" ] || tag='latest' + tag="${tag%@*}" + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" + + if [ -z "$imageId" ]; then + imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" + imageId="${imageId//\"/}" + fi + + ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" + if [ "${ancestryJson:0:1}" != '[' ]; then + echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" + echo >&2 " $ancestryJson" + exit 1 + fi + + IFS=',' + ancestry=( ${ancestryJson//[\[\] \"]/} ) + unset IFS + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." + for imageId in "${ancestry[@]}"; do + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/contrib/host-integration/Dockerfile.dev b/contrib/host-integration/Dockerfile.dev new file mode 100644 index 00000000..c8df8528 --- /dev/null +++ b/contrib/host-integration/Dockerfile.dev @@ -0,0 +1,25 @@ +# +# This Dockerfile will create an image that allows to generate upstart and +# systemd scripts (more to come) +# + +FROM ubuntu:12.10 +MAINTAINER Guillaume J. Charmes + +RUN apt-get update && apt-get install -y wget git mercurial + +# Install Go +RUN wget --no-check-certificate https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz -O go-1.1.2.tar.gz +RUN tar -xzvf go-1.1.2.tar.gz && mv /go /goroot +RUN mkdir /go + +ENV GOROOT /goroot +ENV GOPATH /go +ENV PATH $GOROOT/bin:$PATH + +RUN go get github.com/docker/docker && cd /go/src/github.com/docker/docker && git checkout v0.6.3 +ADD manager.go /manager/ +RUN cd /manager && go build -o /usr/bin/manager + +ENTRYPOINT ["/usr/bin/manager"] + diff --git a/contrib/host-integration/Dockerfile.min b/contrib/host-integration/Dockerfile.min new file mode 100644 index 00000000..60bb89b9 --- /dev/null +++ b/contrib/host-integration/Dockerfile.min @@ -0,0 +1,4 @@ +FROM busybox +MAINTAINER Guillaume J. Charmes +ADD manager /usr/bin/ +ENTRYPOINT ["/usr/bin/manager"] diff --git a/contrib/host-integration/manager.go b/contrib/host-integration/manager.go new file mode 100644 index 00000000..c0b488b2 --- /dev/null +++ b/contrib/host-integration/manager.go @@ -0,0 +1,130 @@ +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "github.com/docker/docker" + "os" + "strings" + "text/template" +) + +var templates = map[string]string{ + + "upstart": `description "{{.description}}" +author "{{.author}}" +start on filesystem and started lxc-net and started docker +stop on runlevel [!2345] +respawn +exec /home/vagrant/goroot/bin/docker start -a {{.container_id}} +`, + + "systemd": `[Unit] + Description={{.description}} + Author={{.author}} + After=docker.service + +[Service] + Restart=always + ExecStart=/usr/bin/docker start -a {{.container_id}} + ExecStop=/usr/bin/docker stop -t 2 {{.container_id}} + +[Install] + WantedBy=local.target +`, +} + +func main() { + // Parse command line for custom options + kind := flag.String("t", "upstart", "Type of manager requested") + author := flag.String("a", "", "Author of the image") + description := flag.String("d", "", "Description of the image") + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "\nUsage: manager \n\n") + flag.PrintDefaults() + } + flag.Parse() + + // We require at least the container ID + if flag.NArg() != 1 { + println(flag.NArg()) + flag.Usage() + return + } + + // Check that the requested process manager is supported + if _, exists := templates[*kind]; !exists { + panic("Unknown script template") + } + + // Load the requested template + tpl, err := template.New("processManager").Parse(templates[*kind]) + if err != nil { + panic(err) + } + + // Create stdout/stderr buffers + bufOut := bytes.NewBuffer(nil) + bufErr := bytes.NewBuffer(nil) + + // Instanciate the Docker CLI + cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock", false, nil) + // Retrieve the container info + if err := cli.CmdInspect(flag.Arg(0)); err != nil { + // As of docker v0.6.3, CmdInspect always returns nil + panic(err) + } + + // If there is nothing in the error buffer, then the Docker daemon is there and the container has been found + if bufErr.Len() == 0 { + // Unmarshall the resulting container data + c := []*docker.Container{{}} + if err := json.Unmarshal(bufOut.Bytes(), &c); err != nil { + panic(err) + } + // Reset the buffers + bufOut.Reset() + bufErr.Reset() + // Retrieve the info of the linked image + if err := cli.CmdInspect(c[0].Image); err != nil { + panic(err) + } + // If there is nothing in the error buffer, then the image has been found. + if bufErr.Len() == 0 { + // Unmarshall the resulting image data + img := []*docker.Image{{}} + if err := json.Unmarshal(bufOut.Bytes(), &img); err != nil { + panic(err) + } + // If no author has been set, use the one from the image + if *author == "" && img[0].Author != "" { + *author = strings.Replace(img[0].Author, "\"", "", -1) + } + // If no description has been set, use the comment from the image + if *description == "" && img[0].Comment != "" { + *description = strings.Replace(img[0].Comment, "\"", "", -1) + } + } + } + + /// Old version: Wrtie the resulting script to file + // f, err := os.OpenFile(kind, os.O_CREATE|os.O_WRONLY, 0755) + // if err != nil { + // panic(err) + // } + // defer f.Close() + + // Create a map with needed data + data := map[string]string{ + "author": *author, + "description": *description, + "container_id": flag.Arg(0), + } + + // Process the template and output it on Stdout + if err := tpl.Execute(os.Stdout, data); err != nil { + panic(err) + } +} diff --git a/contrib/host-integration/manager.sh b/contrib/host-integration/manager.sh new file mode 100755 index 00000000..8ea296f5 --- /dev/null +++ b/contrib/host-integration/manager.sh @@ -0,0 +1,53 @@ +#!/bin/sh +set -e + +usage() { + echo >&2 "usage: $0 [-a author] [-d description] container [manager]" + echo >&2 " ie: $0 -a 'John Smith' 4ec9612a37cd systemd" + echo >&2 " ie: $0 -d 'Super Cool System' 4ec9612a37cd # defaults to upstart" + exit 1 +} + +auth='' +desc='' +have_auth= +have_desc= +while getopts a:d: opt; do + case "$opt" in + a) + auth="$OPTARG" + have_auth=1 + ;; + d) + desc="$OPTARG" + have_desc=1 + ;; + esac +done +shift $(($OPTIND - 1)) + +[ $# -ge 1 -a $# -le 2 ] || usage + +cid="$1" +script="${2:-upstart}" +if [ ! -e "manager/$script" ]; then + echo >&2 "Error: manager type '$script' is unknown (PRs always welcome!)." + echo >&2 'The currently supported types are:' + echo >&2 " $(cd manager && echo *)" + exit 1 +fi + +# TODO https://github.com/docker/docker/issues/734 (docker inspect formatting) +#if command -v docker > /dev/null 2>&1; then +# image="$(docker inspect -f '{{.Image}}' "$cid")" +# if [ "$image" ]; then +# if [ -z "$have_auth" ]; then +# auth="$(docker inspect -f '{{.Author}}' "$image")" +# fi +# if [ -z "$have_desc" ]; then +# desc="$(docker inspect -f '{{.Comment}}' "$image")" +# fi +# fi +#fi + +exec "manager/$script" "$cid" "$auth" "$desc" diff --git a/contrib/host-integration/manager/systemd b/contrib/host-integration/manager/systemd new file mode 100755 index 00000000..c1ab34ef --- /dev/null +++ b/contrib/host-integration/manager/systemd @@ -0,0 +1,20 @@ +#!/bin/sh +set -e + +cid="$1" +auth="$2" +desc="$3" + +cat <<-EOF + [Unit] + Description=$desc + Author=$auth + After=docker.service + + [Service] + ExecStart=/usr/bin/docker start -a $cid + ExecStop=/usr/bin/docker stop -t 2 $cid + + [Install] + WantedBy=local.target +EOF diff --git a/contrib/host-integration/manager/upstart b/contrib/host-integration/manager/upstart new file mode 100755 index 00000000..af90f1fd --- /dev/null +++ b/contrib/host-integration/manager/upstart @@ -0,0 +1,15 @@ +#!/bin/sh +set -e + +cid="$1" +auth="$2" +desc="$3" + +cat <<-EOF + description "$(echo "$desc" | sed 's/"/\\"/g')" + author "$(echo "$auth" | sed 's/"/\\"/g')" + start on filesystem and started lxc-net and started docker + stop on runlevel [!2345] + respawn + exec /usr/bin/docker start -a "$cid" +EOF diff --git a/contrib/httpserver/Dockerfile b/contrib/httpserver/Dockerfile new file mode 100644 index 00000000..747dc91b --- /dev/null +++ b/contrib/httpserver/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/contrib/httpserver/server.go b/contrib/httpserver/server.go new file mode 100644 index 00000000..a75d5abb --- /dev/null +++ b/contrib/httpserver/server.go @@ -0,0 +1,12 @@ +package main + +import ( + "log" + "net/http" +) + +func main() { + fs := http.FileServer(http.Dir("/static")) + http.Handle("/", fs) + log.Panic(http.ListenAndServe(":80", nil)) +} diff --git a/contrib/init/openrc/docker.confd b/contrib/init/openrc/docker.confd new file mode 100644 index 00000000..ae247c00 --- /dev/null +++ b/contrib/init/openrc/docker.confd @@ -0,0 +1,13 @@ +# /etc/conf.d/docker: config file for /etc/init.d/docker + +# where the docker daemon output gets piped +#DOCKER_LOGFILE="/var/log/docker.log" + +# where docker's pid get stored +#DOCKER_PIDFILE="/run/docker.pid" + +# where the docker daemon itself is run from +#DOCKER_BINARY="/usr/bin/docker" + +# any other random options you want to pass to docker +DOCKER_OPTS="" diff --git a/contrib/init/openrc/docker.initd b/contrib/init/openrc/docker.initd new file mode 100755 index 00000000..221776c2 --- /dev/null +++ b/contrib/init/openrc/docker.initd @@ -0,0 +1,34 @@ +#!/sbin/runscript +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ + +DOCKER_LOGFILE=${DOCKER_LOGFILE:-/var/log/${SVCNAME}.log} +DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid} +DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker} +DOCKER_OPTS=${DOCKER_OPTS:-} + +start() { + checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + ulimit -u 1048576 + + ebegin "Starting docker daemon" + start-stop-daemon --start --background \ + --exec "$DOCKER_BINARY" \ + --pidfile "$DOCKER_PIDFILE" \ + --stdout "$DOCKER_LOGFILE" \ + --stderr "$DOCKER_LOGFILE" \ + -- daemon -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS + eend $? +} + +stop() { + ebegin "Stopping docker daemon" + start-stop-daemon --stop \ + --exec "$DOCKER_BINARY" \ + --pidfile "$DOCKER_PIDFILE" + eend $? +} diff --git a/contrib/init/systemd/REVIEWERS b/contrib/init/systemd/REVIEWERS new file mode 100644 index 00000000..b9ba55b3 --- /dev/null +++ b/contrib/init/systemd/REVIEWERS @@ -0,0 +1,3 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service new file mode 100644 index 00000000..f09c2d39 --- /dev/null +++ b/contrib/init/systemd/docker.service @@ -0,0 +1,16 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network.target docker.socket +Requires=docker.socket + +[Service] +Type=notify +ExecStart=/usr/bin/docker daemon -H fd:// +MountFlags=slave +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity + +[Install] +WantedBy=multi-user.target diff --git a/contrib/init/systemd/docker.socket b/contrib/init/systemd/docker.socket new file mode 100644 index 00000000..7dd95098 --- /dev/null +++ b/contrib/init/systemd/docker.socket @@ -0,0 +1,12 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker.service + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker new file mode 100755 index 00000000..11500a0a --- /dev/null +++ b/contrib/init/sysvinit-debian/docker @@ -0,0 +1,149 @@ +#!/bin/sh +set -e + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=$(basename $0) + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKER=/usr/bin/$BASE +# This is the pid file managed by docker itself +DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# Check docker is present +if [ ! -x $DOCKER ]; then + log_failure_msg "$DOCKER not present or not executable" + exit 1 +fi + +check_init() { + # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) + if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 + fi +} + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + check_init + + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + if [ "$BASH" ]; then + ulimit -u 1048576 + else + ulimit -p 1048576 + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKER" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ + -- \ + daemon -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + check_init + fail_unless_root + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" + log_end_msg $? + ;; + + restart) + check_init + fail_unless_root + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + check_init + fail_unless_root + $0 restart + ;; + + status) + check_init + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC" + ;; + + *) + echo "Usage: service docker {start|stop|restart|status}" + exit 1 + ;; +esac diff --git a/contrib/init/sysvinit-debian/docker.default b/contrib/init/sysvinit-debian/docker.default new file mode 100644 index 00000000..14e66017 --- /dev/null +++ b/contrib/init/sysvinit-debian/docker.default @@ -0,0 +1,13 @@ +# Docker Upstart and SysVinit configuration file + +# Customize location of Docker binary (especially for development testing). +#DOCKER="/usr/local/bin/docker" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/contrib/init/sysvinit-redhat/docker b/contrib/init/sysvinit-redhat/docker new file mode 100755 index 00000000..6d00e3ca --- /dev/null +++ b/contrib/init/sysvinit-redhat/docker @@ -0,0 +1,140 @@ +#!/bin/sh +# +# /etc/rc.d/init.d/docker +# +# Daemon for docker.com +# +# chkconfig: 2345 95 95 +# description: Daemon for docker.com + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $network cgconfig +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: start and stop docker +# Description: Daemon for docker.com +### END INIT INFO + +# Source function library. +. /etc/rc.d/init.d/functions + +prog="docker" +unshare=/usr/bin/unshare +exec="/usr/bin/$prog" +pidfile="/var/run/$prog.pid" +lockfile="/var/lock/subsys/$prog" +logfile="/var/log/$prog" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +prestart() { + service cgconfig status > /dev/null + + if [[ $? != 0 ]]; then + service cgconfig start + fi + +} + +start() { + [ -x $exec ] || exit 5 + + check_for_cleanup + + if ! [ -f $pidfile ]; then + prestart + printf "Starting $prog:\t" + echo "\n$(date)\n" >> $logfile + "$unshare" -m -- $exec daemon $other_args &>> $logfile & + pid=$! + touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/docker/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + done + success + echo + else + failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile -d 300 $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +check_for_cleanup() { + if [ -f ${pidfile} ]; then + /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} + fi +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/contrib/init/sysvinit-redhat/docker.sysconfig b/contrib/init/sysvinit-redhat/docker.sysconfig new file mode 100644 index 00000000..0864b3d7 --- /dev/null +++ b/contrib/init/sysvinit-redhat/docker.sysconfig @@ -0,0 +1,7 @@ +# /etc/sysconfig/docker +# +# Other arguments to pass to the docker daemon process +# These will be parsed by the sysv initscript and appended +# to the arguments list passed to docker daemon + +other_args="" diff --git a/contrib/init/upstart/REVIEWERS b/contrib/init/upstart/REVIEWERS new file mode 100644 index 00000000..03ee2dde --- /dev/null +++ b/contrib/init/upstart/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf new file mode 100644 index 00000000..ec50b35a --- /dev/null +++ b/contrib/init/upstart/docker.conf @@ -0,0 +1,60 @@ +description "Docker daemon" + +start on (local-filesystems and net-device-up IFACE!=lo) +stop on runlevel [!2345] +limit nofile 524288 1048576 +limit nproc 524288 1048576 + +respawn + +kill timeout 20 + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKER=/usr/bin/$UPSTART_JOB + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + exec "$DOCKER" daemon $DOCKER_OPTS +end script + +# Don't emit "started" event until docker.sock is ready. +# See https://github.com/docker/docker/issues/6647 +post-start script + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then + while ! [ -e /var/run/docker.sock ]; do + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 + echo "Waiting for /var/run/docker.sock" + sleep 0.1 + done + echo "/var/run/docker.sock is up" + fi +end script diff --git a/contrib/mkimage-alpine.sh b/contrib/mkimage-alpine.sh new file mode 100755 index 00000000..71fb9143 --- /dev/null +++ b/contrib/mkimage-alpine.sh @@ -0,0 +1,82 @@ +#!/bin/sh + +set -e + +[ $(id -u) -eq 0 ] || { + printf >&2 '%s requires root\n' "$0" + exit 1 +} + +usage() { + printf >&2 '%s: [-r release] [-m mirror] [-s]\n' "$0" + exit 1 +} + +tmp() { + TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) + ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) + trap "rm -rf $TMP $ROOTFS" EXIT TERM INT +} + +apkv() { + curl -sSL $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + grep '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 +} + +getapk() { + curl -sSL $REPO/$ARCH/apk-tools-static-$(apkv).apk | + tar -xz -C $TMP sbin/apk.static +} + +mkbase() { + $TMP/sbin/apk.static --repository $REPO --update-cache --allow-untrusted \ + --root $ROOTFS --initdb add alpine-base +} + +conf() { + printf '%s\n' $REPO > $ROOTFS/etc/apk/repositories +} + +pack() { + local id + id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) + + docker tag $id alpine:latest + docker run -i -t alpine printf 'alpine:%s with id=%s created!\n' $REL $id +} + +save() { + [ $SAVE -eq 1 ] || return + + tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz +} + +while getopts "hr:m:s" opt; do + case $opt in + r) + REL=$OPTARG + ;; + m) + MIRROR=$OPTARG + ;; + s) + SAVE=1 + ;; + *) + usage + ;; + esac +done + +REL=${REL:-edge} +MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} +SAVE=${SAVE:-0} +REPO=$MIRROR/$REL/main +ARCH=${ARCH:-$(uname -m)} + +tmp +getapk +mkbase +conf +pack +save diff --git a/contrib/mkimage-arch-pacman.conf b/contrib/mkimage-arch-pacman.conf new file mode 100644 index 00000000..45fe03dc --- /dev/null +++ b/contrib/mkimage-arch-pacman.conf @@ -0,0 +1,92 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = auto + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh new file mode 100755 index 00000000..06406fec --- /dev/null +++ b/contrib/mkimage-arch.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for archlinux and load it into the local +# docker as "archlinux" +# requires root +set -e + +hash pacstrap &>/dev/null || { + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 +} + +hash expect &>/dev/null || { + echo "Could not find expect. Run pacman -S expect" + exit 1 +} + +export LANG="C.UTF-8" + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) +chmod 755 $ROOTFS + +# packages to ignore for space savings +PKGIGNORE=( + cryptsetup + device-mapper + dhcpcd + iproute2 + jfsutils + linux + lvm2 + man-db + man-pages + mdadm + nano + netctl + openresolv + pciutils + pcmciautils + reiserfsprogs + s-nail + systemd-sysvcompat + usbutils + vi + xfsprogs +) +IFS=',' +PKGIGNORE="${PKGIGNORE[*]}" +unset IFS + +expect < $ROOTFS/etc/locale.gen +arch-chroot $ROOTFS locale-gen +arch-chroot $ROOTFS /bin/sh -c 'echo "Server = https://mirrors.kernel.org/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist' + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 +ln -sf /proc/self/fd $DEV/fd + +tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - archlinux +docker run -t archlinux echo Success. +rm -rf $ROOTFS diff --git a/contrib/mkimage-busybox.sh b/contrib/mkimage-busybox.sh new file mode 100755 index 00000000..b11a6bb2 --- /dev/null +++ b/contrib/mkimage-busybox.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Generate a very minimal filesystem based on busybox-static, +# and load it into the local docker under the name "busybox". + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' +echo >&2 + +BUSYBOX=$(which busybox) +[ "$BUSYBOX" ] || { + echo "Sorry, I could not locate busybox." + echo "Try 'apt-get install busybox-static'?" + exit 1 +} + +set -e +ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM +mkdir $ROOTFS +cd $ROOTFS + +mkdir bin etc dev dev/pts lib proc sys tmp +touch etc/resolv.conf +cp /etc/nsswitch.conf etc/nsswitch.conf +echo root:x:0:0:root:/:/bin/sh > etc/passwd +echo root:x:0: > etc/group +ln -s lib lib64 +ln -s bin sbin +cp $BUSYBOX bin +for X in $(busybox --list) +do + ln -s busybox bin/$X +done +rm bin/init +ln bin/busybox bin/init +cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib +cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib +for X in console null ptmx random stdin stdout stderr tty urandom zero +do + cp -a /dev/$X dev +done + +tar --numeric-owner -cf- . | docker import - busybox +docker run -i -u root busybox /bin/echo Success. diff --git a/contrib/mkimage-crux.sh b/contrib/mkimage-crux.sh new file mode 100755 index 00000000..3f0bdcae --- /dev/null +++ b/contrib/mkimage-crux.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for CRUX/Linux and load it into the local +# docker as "cruxlinux" +# requires root and the crux iso (http://crux.nu) + +set -e + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" + +ISO=${1} + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) +TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) + +VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') + +# Mount the ISO +mount -o ro,loop $ISO $CRUX + +# Extract pkgutils +tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz + +# Put pkgadd in the $PATH +export PATH="$TMP/usr/bin:$PATH" + +# Install core packages +mkdir -p $ROOTFS/var/lib/pkg +touch $ROOTFS/var/lib/pkg/db +for pkg in $CRUX/crux/core/*; do + pkgadd -r $ROOTFS $pkg +done + +# Remove agetty and inittab config +if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then + echo "Removing agetty from /etc/inittab ..." + chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab +fi + +# Remove kernel source +rm -rf $ROOTFS/usr/src/* + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 + +IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) +docker tag $IMAGE_ID crux:latest +docker run -i -t crux echo Success. + +# Cleanup +umount $CRUX +rm -rf $ROOTFS +rm -rf $CRUX +rm -rf $TMP diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh new file mode 100755 index 00000000..412a5ce0 --- /dev/null +++ b/contrib/mkimage-debootstrap.sh @@ -0,0 +1,297 @@ +#!/usr/bin/env bash +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' +echo >&2 + +variant='minbase' +include='iproute,iputils-ping' +arch='amd64' # intentionally undocumented for now +skipDetection= +strictDebootstrap= +justTar= + +usage() { + echo >&2 + + echo >&2 "usage: $0 [options] repo suite [mirror]" + + echo >&2 + echo >&2 'options: (not recommended)' + echo >&2 " -p set an http_proxy for debootstrap" + echo >&2 " -v $variant # change default debootstrap variant" + echo >&2 " -i $include # change default package includes" + echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" + echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" + echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" + echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" + + echo >&2 + echo >&2 " ie: $0 username/debian squeeze" + echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" + + echo >&2 + echo >&2 " ie: $0 username/ubuntu precise" + echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" + + echo >&2 + echo >&2 " ie: $0 -t precise.tar.bz2 precise" + echo >&2 " $0 -t wheezy.tgz wheezy" + echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" + + echo >&2 +} + +# these should match the names found at http://www.debian.org/releases/ +debianStable=wheezy +debianUnstable=sid +# this should match the name found at http://releases.ubuntu.com/ +ubuntuLatestLTS=trusty +# this should match the name found at http://releases.tanglu.org/ +tangluLatest=aequorea + +while getopts v:i:a:p:dst name; do + case "$name" in + p) + http_proxy="$OPTARG" + ;; + v) + variant="$OPTARG" + ;; + i) + include="$OPTARG" + ;; + a) + arch="$OPTARG" + ;; + d) + strictDebootstrap=1 + ;; + s) + skipDetection=1 + ;; + t) + justTar=1 + ;; + ?) + usage + exit 0 + ;; + esac +done +shift $(($OPTIND - 1)) + +repo="$1" +suite="$2" +mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided + +if [ ! "$repo" ] || [ ! "$suite" ]; then + usage + exit 1 +fi + +# some rudimentary detection for whether we need to "sudo" our docker calls +docker='' +if docker version > /dev/null 2>&1; then + docker='docker' +elif sudo docker version > /dev/null 2>&1; then + docker='sudo docker' +elif command -v docker > /dev/null 2>&1; then + docker='docker' +else + echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" + echo >&2 " this script is not likely to work as expected" + sleep 3 + docker='docker' # give us a command-not-found later +fi + +# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory +if [ "$justTar" ]; then + if [ ! -d "$(dirname "$repo")" ]; then + echo >&2 "error: $(dirname "$repo") does not exist" + exit 1 + fi + repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" +fi + +# will be filled in later, if [ -z "$skipDetection" ] +lsbDist='' + +target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +if [ "$suite" = 'lucid' ]; then + # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails + include+=',gpgv' +fi + +set -x + +# bootstrap +mkdir -p "$target" +sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" + +cd "$target" + +if [ -z "$strictDebootstrap" ]; then + # prevent init scripts from running during install/update + # policy-rc.d (for most scripts) + echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null + sudo chmod +x usr/sbin/policy-rc.d + # initctl (for some pesky upstart scripts) + sudo chroot . dpkg-divert --local --rename --add /sbin/initctl + sudo ln -sf /bin/true sbin/initctl + # see https://github.com/docker/docker/issues/446#issuecomment-16953173 + + # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) + sudo chroot . apt-get clean + + if strings usr/bin/dpkg | grep -q unsafe-io; then + # while we're at it, apt is unnecessarily slow inside containers + # this forces dpkg not to call sync() after package extraction and speeds up install + # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization + echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null + # we have this wrapped up in an "if" because the "force-unsafe-io" + # option was added in dpkg 1.15.8.6 + # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), + # and ubuntu lucid/10.04 only has 1.15.5.6 + fi + + # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) + { + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo "DPkg::Post-Invoke { ${aptGetClean} };" + echo "APT::Update::Post-Invoke { ${aptGetClean} };" + echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' + } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null + + # and remove the translations, too + echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null + + # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): + # rm /usr/sbin/policy-rc.d + # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl + # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup + # rm /etc/apt/apt.conf.d/no-cache + # rm /etc/apt/apt.conf.d/no-languages + + if [ -z "$skipDetection" ]; then + # see also rudimentary platform detection in hack/install.sh + lsbDist='' + if [ -r etc/lsb-release ]; then + lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then + lsbDist='Debian' + fi + + case "$lsbDist" in + Debian) + # add the updates and security repositories + if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then + # ${suite}-updates only applies to non-unstable + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + + # same for security updates + echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null + fi + ;; + Ubuntu) + # add the universe, updates, and security repositories + sudo sed -i " + s/ $suite main$/ $suite main universe/; p; + s/ $suite main/ ${suite}-updates main/; p; + s/ $suite-updates main/ ${suite}-security main/ + " etc/apt/sources.list + ;; + Tanglu) + # add the updates repository + if [ "$suite" = "$tangluLatest" ]; then + # ${suite}-updates only applies to stable Tanglu versions + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + fi + ;; + SteamOS) + # add contrib and non-free + sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list + ;; + esac + fi + + # make sure our packages lists are as up to date as we can get them + sudo chroot . apt-get update + sudo chroot . apt-get dist-upgrade -y +fi + +if [ "$justTar" ]; then + # create the tarball file so it has the right permissions (ie, not root) + touch "$repo" + + # fill the tarball + sudo tar --numeric-owner -caf "$repo" . +else + # create the image (and tag $repo:$suite) + sudo tar --numeric-owner -c . | $docker import - $repo:$suite + + # test the image + $docker run -i -t $repo:$suite echo success + + if [ -z "$skipDetection" ]; then + case "$lsbDist" in + Debian) + if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + + if [ -r etc/debian_version ]; then + # tag the specific debian release version (which is only reasonable to tag on debian stable) + ver=$(cat etc/debian_version) + $docker tag $repo:$suite $repo:$ver + fi + fi + ;; + Ubuntu) + if [ "$suite" = "$ubuntuLatestLTS" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Ubuntu version number, if available (12.04, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + Tanglu) + if [ "$suite" = "$tangluLatest" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Tanglu version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + SteamOS) + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific SteamOS version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + esac + fi +fi + +# cleanup +cd "$returnTo" +sudo rm -rf "$target" diff --git a/contrib/mkimage-rinse.sh b/contrib/mkimage-rinse.sh new file mode 100755 index 00000000..7e093506 --- /dev/null +++ b/contrib/mkimage-rinse.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. + +# This script is useful on systems with rinse available (e.g., +# building a CentOS image on Debian). See contrib/mkimage-yum.sh for +# a way to build CentOS images on systems with yum installed. + +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' +echo >&2 + +repo="$1" +distro="$2" +mirror="$3" + +if [ ! "$repo" ] || [ ! "$distro" ]; then + self="$(basename $0)" + echo >&2 "usage: $self repo distro [mirror]" + echo >&2 + echo >&2 " ie: $self username/centos centos-5" + echo >&2 " $self username/centos centos-6" + echo >&2 + echo >&2 " ie: $self username/slc slc-5" + echo >&2 " $self username/slc slc-6" + echo >&2 + echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" + echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" + echo >&2 + echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' + echo >&2 ' expected values of "mirror".' + echo >&2 + echo >&2 'This script is tested to work with the original upstream version of rinse,' + echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' + echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' + echo >&2 + exit 1 +fi + +target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) +if [ "$mirror" ]; then + rinseArgs+=( --mirror "$mirror" ) +fi + +set -x + +mkdir -p "$target" + +sudo rinse "${rinseArgs[@]}" + +cd "$target" + +# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own +sudo rm -rf dev +sudo mkdir -m 755 dev +( + cd dev + sudo ln -sf /proc/self/fd ./ + sudo mkdir -m 755 pts + sudo mkdir -m 1777 shm + sudo mknod -m 600 console c 5 1 + sudo mknod -m 600 initctl p + sudo mknod -m 666 full c 1 7 + sudo mknod -m 666 null c 1 3 + sudo mknod -m 666 ptmx c 5 2 + sudo mknod -m 666 random c 1 8 + sudo mknod -m 666 tty c 5 0 + sudo mknod -m 666 tty0 c 4 0 + sudo mknod -m 666 urandom c 1 9 + sudo mknod -m 666 zero c 1 5 +) + +# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" +# locales +sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} +# docs and man pages +sudo rm -rf usr/share/{man,doc,info,gnome/help} +# cracklib +sudo rm -rf usr/share/cracklib +# i18n +sudo rm -rf usr/share/i18n +# yum cache +sudo rm -rf var/cache/yum +sudo mkdir -p --mode=0755 var/cache/yum +# sln +sudo rm -rf sbin/sln +# ldconfig +#sudo rm -rf sbin/ldconfig +sudo rm -rf etc/ld.so.cache var/cache/ldconfig +sudo mkdir -p --mode=0755 var/cache/ldconfig + +# allow networking init scripts inside the container to work without extra steps +echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null + +# to restore locales later: +# yum reinstall glibc-common + +version= +if [ -r etc/redhat-release ]; then + version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" +elif [ -r etc/SuSE-release ]; then + version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" +fi + +if [ -z "$version" ]; then + echo >&2 "warning: cannot autodetect OS version, using $distro as tag" + sleep 20 + version="$distro" +fi + +sudo tar --numeric-owner -c . | docker import - $repo:$version + +docker run -i -t $repo:$version echo success + +cd "$returnTo" +sudo rm -rf "$target" diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh new file mode 100755 index 00000000..f48cb434 --- /dev/null +++ b/contrib/mkimage-yum.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +usage() { + cat < +OPTIONS: + -y The path to the yum config to install packages from. The + default is /etc/yum.conf. +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +while getopts ":y:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +#-------------------- + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +mkdir -m 755 "$target"/dev +mknod -m 600 "$target"/dev/console c 5 1 +mknod -m 600 "$target"/dev/initctl p +mknod -m 666 "$target"/dev/full c 1 7 +mknod -m 666 "$target"/dev/null c 1 3 +mknod -m 666 "$target"/dev/ptmx c 5 2 +mknod -m 666 "$target"/dev/random c 1 8 +mknod -m 666 "$target"/dev/tty c 5 0 +mknod -m 666 "$target"/dev/tty0 c 4 0 +mknod -m 666 "$target"/dev/urandom c 1 9 +mknod -m 666 "$target"/dev/zero c 1 5 + +# amazon linux yum will fail without vars set +if [ -d /etc/yum/vars ]; then + mkdir -p -m 755 "$target"/etc/yum + cp -a /etc/yum/vars "$target"/etc/yum/ +fi + +yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall Core +yum -c "$yum_config" --installroot="$target" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version +docker run -i -t $name:$version echo success + +rm -rf "$target" diff --git a/contrib/mkimage.sh b/contrib/mkimage.sh new file mode 100755 index 00000000..3976d72d --- /dev/null +++ b/contrib/mkimage.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] [--compression algo| --no-compression] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +compression="auto" +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + --compression) compression="$2" ; shift 2 ;; + --no-compression) compression="none" ; shift 1 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ "$compression" == 'auto' ] || [ -z "$compression" ] +then + compression='xz' +fi + +[ "$compression" == 'none' ] && compression='' + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar${compression:+.$compression}" +touch "$tarFile" + +( + set -x + tar --numeric-owner --create --auto-compress --file "$tarFile" --directory "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/contrib/mkimage/.febootstrap-minimize b/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 00000000..7749e63f --- /dev/null +++ b/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs and man pages + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/contrib/mkimage/busybox-static b/contrib/mkimage/busybox-static new file mode 100755 index 00000000..e15322b4 --- /dev/null +++ b/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap new file mode 100755 index 00000000..c613d537 --- /dev/null +++ b/contrib/mkimage/debootstrap @@ -0,0 +1,240 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +shift + +# get path to "chroot" in our current PATH +chrootPath="$(type -P chroot)" +rootfs_chroot() { + # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! + + # set PATH and chroot away! + PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ + "$chrootPath" "$rootfsDir" "$@" +} + +# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... +: ${DEBOOTSTRAP:=debootstrap} + +( + set -x + $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' + #!/bin/sh + + # For most Docker users, "apt-get install" only happens during "docker build", + # where starting services doesn't work and often fails in humorous ways. This + # prevents those failures by stopping the services from attempting to start. + + exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl + cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" + sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" +) + +# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) +( set -x; rootfs_chroot apt-get clean ) + +# this file is one APT creates to make sure we don't "autoremove" our currently +# in-use kernel, which doesn't really apply to debootstraps/Docker images that +# don't even have kernels installed +rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' + # For most Docker users, package installs happen during "docker build", which + # doesn't survive power loss and gets restarted clean afterwards anyhow, so + # this minor tweak gives us a nice speedup (much nicer on spinning disks, + # obviously). + + force-unsafe-io + EOF +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + # Since for most Docker users, package installs happen in "docker build" steps, + # they essentially become individual layers due to the way Docker handles + # layering, especially using CoW filesystems. What this means for us is that + # the caches that APT keeps end up just wasting space in those layers, making + # our layers unnecessarily large (especially since we'll normally never use + # these caches again and will instead just "docker build" again and make a brand + # new image). + + # Ideally, these would just be invoking "apt-get clean", but in our testing, + # that ended up being cyclic and we got stuck on APT's lock, so we get this fun + # creation that's essentially just "apt-get clean". + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + + # Note that we do realize this isn't the ideal way to do this, and are always + # open to better suggestions (https://github.com/docker/docker/issues). + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' + # In Docker, we don't often need the "Translations" files, so we're just wasting + # time and space by downloading them, and this inhibits that. For users that do + # need them, it's a simple matter to delete this file and "apt-get update". :) + + Acquire::Languages "none"; + EOF + + echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF + + # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed + echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' + # Since Docker users are looking for the smallest possible final images, the + # following emerges as a very common pattern: + + # RUN apt-get update \ + # && apt-get install -y \ + # && \ + # && apt-get purge -y --auto-remove + + # By default, APT will actually _keep_ packages installed via Recommends or + # Depends if another package Suggests them, even and including if the package + # that originally caused them to be installed is removed. Setting this to + # "false" ensures that APT is appropriately aggressive about removing the + # packages it added. + + # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant + Apt::AutoRemove::SuggestsImportant "false"; + EOF +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + # normalize to lowercase for easier matching + lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" + case "$lsbDist" in + debian) + # updates and security! + if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + # squeeze-lts + if [ -f "$rootfsDir/etc/debian_version" ]; then + ltsSuite= + case "$(cat "$rootfsDir/etc/debian_version")" in + 6.*) ltsSuite='squeeze-lts' ;; + #7.*) ltsSuite='wheezy-lts' ;; + #8.*) ltsSuite='jessie-lts' ;; + esac + if [ "$ltsSuite" ]; then + head -1 "$rootfsDir/etc/apt/sources.list" \ + | sed "s/ $suite / $ltsSuite /" \ + >> "$rootfsDir/etc/apt/sources.list" + fi + fi + ) + fi + ;; + ubuntu) + # add the updates and security repositories + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates /; p; + s/ $suite-updates / ${suite}-security / + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos) + # add contrib and non-free if "main" is the only component + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +( + set -x + + # make sure we're fully up-to-date + rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' + + # delete all the apt list files since they're big and get stale quickly + rm -rf "$rootfsDir/var/lib/apt/lists"/* + # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." +) diff --git a/contrib/mkimage/mageia-urpmi b/contrib/mkimage/mageia-urpmi new file mode 100755 index 00000000..93fb289c --- /dev/null +++ b/contrib/mkimage/mageia-urpmi @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Needs to be run from Mageia 4 or greater for kernel support for docker. +# +# Mageia 4 does not have docker available in official repos, so please +# install and run the docker binary manually. +# +# Tested working versions are for Mageia 2 onwards (inc. cauldron). +# +set -e + +rootfsDir="$1" +shift + +optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") +eval set -- "$optTemp" +unset optTemp + +installversion= +mirror= +while true; do + case "$1" in + -v|--version) installversion="$2" ; shift 2 ;; + -m|--mirror) mirror="$2" ; shift 2 ;; + --) shift ; break ;; + esac +done + +if [ -z $installversion ]; then + # Attempt to match host version + if [ -r /etc/mageia-release ]; then + installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" + else + echo "Error: no version supplied and unable to detect host mageia version" + exit 1 + fi +fi + +if [ -z $mirror ]; then + # No mirror provided, default to mirrorlist + mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" +fi + +( + set -x + urpmi.addmedia --distrib \ + $mirror \ + --urpmi-root "$rootfsDir" + urpmi basesystem-minimal urpmi \ + --auto \ + --no-suggests \ + --urpmi-root "$rootfsDir" \ + --root "$rootfsDir" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi diff --git a/contrib/mkimage/rinse b/contrib/mkimage/rinse new file mode 100755 index 00000000..75eb4f0d --- /dev/null +++ b/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/contrib/mkseccomp.pl b/contrib/mkseccomp.pl new file mode 100755 index 00000000..28d0645a --- /dev/null +++ b/contrib/mkseccomp.pl @@ -0,0 +1,77 @@ +#!/usr/bin/perl +# +# A simple helper script to help people build seccomp profiles for +# Docker/LXC. The goal is mostly to reduce the attack surface to the +# kernel, by restricting access to rarely used, recently added or not used +# syscalls. +# +# This script processes one or more files which contain the list of system +# calls to be allowed. See mkseccomp.sample for more information how you +# can configure the list of syscalls. When run, this script produces output +# which, when stored in a file, can be passed to docker as follows: +# +# docker run --lxc-conf="lxc.seccomp=$file" +# +# The included sample file shows how to cut about a quarter of all syscalls, +# which affecting most applications. +# +# For specific situations it is possible to reduce the list further. By +# reducing the list to just those syscalls required by a certain application +# you can make it difficult for unknown/unexpected code to run. +# +# Run this script as follows: +# +# ./mkseccomp.pl < mkseccomp.sample >syscalls.list +# or +# ./mkseccomp.pl mkseccomp.sample >syscalls.list +# +# Multiple files can be specified, in which case the lists of syscalls are +# combined. +# +# By Martijn van Oosterhout Nov 2013 + +# How it works: +# +# This program basically spawns two processes to form a chain like: +# +# | cpp | + +use strict; +use warnings; + +if( -t ) { + print STDERR "Helper script to make seccomp filters for Docker/LXC.\n"; + print STDERR "Usage: mkseccomp.pl < [files...]\n"; + exit 1; +} + +my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n"; + +if($pid == 0) { # Child + $pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n"; + + if($pid == 0) { # Child, which execs cpp + exec "cpp" or die "Couldn't exec cpp ($!)\n"; + exit 1; + } + + # Process the DATA section and output to cpp + print $out "#include \n"; + while(<>) { + if(/^\w/) { + print $out "__NR_$_"; + } + } + close $out; + exit 0; + +} + +# Print header and then process output from cpp. +print "1\n"; +print "whitelist\n"; + +while(<$in>) { + print if( /^[0-9]/ ); +} + diff --git a/contrib/mkseccomp.sample b/contrib/mkseccomp.sample new file mode 100644 index 00000000..7a0c8d19 --- /dev/null +++ b/contrib/mkseccomp.sample @@ -0,0 +1,444 @@ +/* This sample file is an example for mkseccomp.pl to produce a seccomp file + * which restricts syscalls that are only useful for an admin but allows the + * vast majority of normal userspace programs to run normally. + * + * The format of this file is one line per syscall. This is then processed + * and passed to 'cpp' to convert the names to numbers using whatever is + * correct for your platform. As such C-style comments are permitted. Note + * this also means that C preprocessor macros are also allowed. So it is + * possible to create groups surrounded by #ifdef/#endif and control their + * inclusion via #define (not #include). + * + * Syscalls that don't exist on your architecture are silently filtered out. + * Syscalls marked with (*) are required for a container to spawn a bash + * shell successfully (not necessarily full featured). Listing the same + * syscall multiple times is no problem. + * + * If you want to make a list specifically for one application the easiest + * way is to run the application under strace, like so: + * + * $ strace -f -q -c -o strace.out application args... + * + * Once you have a reasonable sample of the execution of the program, exit + * it. The file strace.out will have a summary of the syscalls used. Copy + * that list into this file, comment out everything else except the starred + * syscalls (which you need for the container to start) and you're done. + * + * To get the list of syscalls from the strace output this works well for + * me + * + * $ cut -c52 < strace.out + * + * This sample list was compiled as a combination of all the syscalls + * available on i386 and amd64 on Ubuntu Precise, as such it may not contain + * everything and not everything may be relevent for your system. This + * shouldn't be a problem. + */ + +// Filesystem/File descriptor related +access // (*) +chdir // (*) +chmod +chown +chown32 +close // (*) +creat +dup // (*) +dup2 // (*) +dup3 +epoll_create +epoll_create1 +epoll_ctl +epoll_ctl_old +epoll_pwait +epoll_wait +epoll_wait_old +eventfd +eventfd2 +faccessat // (*) +fadvise64 +fadvise64_64 +fallocate +fanotify_init +fanotify_mark +ioctl // (*) +fchdir +fchmod +fchmodat +fchown +fchown32 +fchownat +fcntl // (*) +fcntl64 +fdatasync +fgetxattr +flistxattr +flock +fremovexattr +fsetxattr +fstat // (*) +fstat64 +fstatat64 +fstatfs +fstatfs64 +fsync +ftruncate +ftruncate64 +getcwd // (*) +getdents // (*) +getdents64 +getxattr +inotify_add_watch +inotify_init +inotify_init1 +inotify_rm_watch +io_cancel +io_destroy +io_getevents +io_setup +io_submit +lchown +lchown32 +lgetxattr +link +linkat +listxattr +llistxattr +llseek +_llseek +lremovexattr +lseek // (*) +lsetxattr +lstat +lstat64 +mkdir +mkdirat +mknod +mknodat +newfstatat +_newselect +oldfstat +oldlstat +oldolduname +oldstat +olduname +oldwait4 +open // (*) +openat // (*) +pipe // (*) +pipe2 +poll +ppoll +pread64 +preadv +futimesat +pselect6 +pwrite64 +pwritev +read // (*) +readahead +readdir +readlink +readlinkat +readv +removexattr +rename +renameat +rmdir +select +sendfile +sendfile64 +setxattr +splice +stat // (*) +stat64 +statfs // (*) +statfs64 +symlink +symlinkat +sync +sync_file_range +sync_file_range2 +syncfs +tee +truncate +truncate64 +umask +unlink +unlinkat +ustat +utime +utimensat +utimes +write // (*) +writev + +// Network related +accept +accept4 +bind // (*) +connect // (*) +getpeername +getsockname // (*) +getsockopt +listen +recv +recvfrom // (*) +recvmmsg +recvmsg +send +sendmmsg +sendmsg +sendto // (*) +setsockopt +shutdown +socket // (*) +socketcall +socketpair +sethostname // (*) + +// Signal related +pause +rt_sigaction // (*) +rt_sigpending +rt_sigprocmask // (*) +rt_sigqueueinfo +rt_sigreturn // (*) +rt_sigsuspend +rt_sigtimedwait +rt_tgsigqueueinfo +sigaction +sigaltstack // (*) +signal +signalfd +signalfd4 +sigpending +sigprocmask +sigreturn +sigsuspend + +// Other needed POSIX +alarm +brk // (*) +clock_adjtime +clock_getres +clock_gettime +clock_nanosleep +//clock_settime +gettimeofday +nanosleep +nice +sysinfo +syslog +time +timer_create +timer_delete +timerfd_create +timerfd_gettime +timerfd_settime +timer_getoverrun +timer_gettime +timer_settime +times +uname // (*) + +// Memory control +madvise +mbind +mincore +mlock +mlockall +mmap // (*) +mmap2 +mprotect // (*) +mremap +msync +munlock +munlockall +munmap // (*) +remap_file_pages +set_mempolicy +vmsplice + +// Process control +capget +capset // (*) +clone // (*) +execve // (*) +exit // (*) +exit_group // (*) +fork +getcpu +getpgid +getpgrp // (*) +getpid // (*) +getppid // (*) +getpriority +getresgid +getresgid32 +getresuid +getresuid32 +getrlimit // (*) +getrusage +getsid +getuid // (*) +getuid32 +getegid // (*) +getegid32 +geteuid // (*) +geteuid32 +getgid // (*) +getgid32 +getgroups +getgroups32 +getitimer +get_mempolicy +kill +//personality +prctl +prlimit64 +sched_getaffinity +sched_getparam +sched_get_priority_max +sched_get_priority_min +sched_getscheduler +sched_rr_get_interval +//sched_setaffinity +//sched_setparam +//sched_setscheduler +sched_yield +setfsgid +setfsgid32 +setfsuid +setfsuid32 +setgid +setgid32 +setgroups +setgroups32 +setitimer +setpgid // (*) +setpriority +setregid +setregid32 +setresgid +setresgid32 +setresuid +setresuid32 +setreuid +setreuid32 +setrlimit +setsid +setuid +setuid32 +ugetrlimit +vfork +wait4 // (*) +waitid +waitpid + +// IPC +ipc +mq_getsetattr +mq_notify +mq_open +mq_timedreceive +mq_timedsend +mq_unlink +msgctl +msgget +msgrcv +msgsnd +semctl +semget +semop +semtimedop +shmat +shmctl +shmdt +shmget + +// Linux specific, mostly needed for thread-related stuff +arch_prctl // (*) +get_robust_list +get_thread_area +gettid +futex // (*) +restart_syscall // (*) +set_robust_list // (*) +set_thread_area +set_tid_address // (*) +tgkill +tkill + +// Admin syscalls, these are blocked +//acct +//adjtimex +//bdflush +//chroot +//create_module +//delete_module +//get_kernel_syms // Obsolete +//idle // Obsolete +//init_module +//ioperm +//iopl +//ioprio_get +//ioprio_set +//kexec_load +//lookup_dcookie // oprofile only? +//migrate_pages // NUMA +//modify_ldt +//mount +//move_pages // NUMA +//name_to_handle_at // NFS server +//nfsservctl // NFS server +//open_by_handle_at // NFS server +//perf_event_open +//pivot_root +//process_vm_readv // For debugger +//process_vm_writev // For debugger +//ptrace // For debugger +//query_module +//quotactl +//reboot +//setdomainname +//setns +//settimeofday +//sgetmask // Obsolete +//ssetmask // Obsolete +//stime +//swapoff +//swapon +//_sysctl +//sysfs +//sys_setaltroot +//umount +//umount2 +//unshare +//uselib +//vhangup +//vm86 +//vm86old + +// Kernel key management +//add_key +//keyctl +//request_key + +// Unimplemented +//afs_syscall +//break +//ftime +//getpmsg +//gtty +//lock +//madvise1 +//mpx +//prof +//profil +//putpmsg +//security +//stty +//tuxcall +//ulimit +//vserver diff --git a/contrib/nuke-graph-directory.sh b/contrib/nuke-graph-directory.sh new file mode 100755 index 00000000..8d12a9d6 --- /dev/null +++ b/contrib/nuke-graph-directory.sh @@ -0,0 +1,65 @@ +#!/bin/sh +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs > /dev/null 2>&1; then + root="$(df "$dir" | awk 'NR>1 { print $NF }')" + root="${root#/}" # if root is "/", we want it to become "" + for subvol in $(btrfs subvolume list -o "$root/" 2>/dev/null | awk -F' path ' '{ print $2 }' | sort -r); do + subvolDir="$root/$subvol" + if dir_in_dir "$subvolDir" "$dir"; then + ( set -x; btrfs subvolume delete "$subvolDir" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( set -x; rm -rf "$dir" ) diff --git a/contrib/project-stats.sh b/contrib/project-stats.sh new file mode 100755 index 00000000..2691c72f --- /dev/null +++ b/contrib/project-stats.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +## Run this script from the root of the docker repository +## to query project stats useful to the maintainers. +## You will need to install `pulls` and `issues` from +## https://github.com/crosbymichael/pulls + +set -e + +echo -n "Open pulls: " +PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 +echo $PULLS + +echo -n "Pulls alru: " +pulls alru + +echo -n "Open issues: " +ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 +echo $ISSUES + +echo -n "Issues alru: " +issues alru diff --git a/contrib/report-issue.sh b/contrib/report-issue.sh new file mode 100644 index 00000000..cb54f1a5 --- /dev/null +++ b/contrib/report-issue.sh @@ -0,0 +1,105 @@ +#!/bin/sh + +# This is a convenience script for reporting issues that include a base +# template of information. See https://github.com/docker/docker/pull/8845 + +set -e + +DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} +DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} +DOCKER=${DOCKER:-"docker"} +DOCKER_COMMAND="${DOCKER}" +export DOCKER_COMMAND + +# pulled from https://gist.github.com/cdown/1163649 +function urlencode() { + # urlencode + + local length="${#1}" + for (( i = 0; i < length; i++ )); do + local c="${1:i:1}" + case $c in + [a-zA-Z0-9.~_-]) printf "$c" ;; + *) printf '%%%02X' "'$c" + esac + done +} + +function template() { +# this should always match the template from CONTRIBUTING.md + cat <<- EOM + Description of problem: + + + \`docker version\`: + `${DOCKER_COMMAND} -D version` + + + \`docker info\`: + `${DOCKER_COMMAND} -D info` + + + \`uname -a\`: + `uname -a` + + + Environment details (AWS, VirtualBox, physical, etc.): + + + How reproducible: + + + Steps to Reproduce: + 1. + 2. + 3. + + + Actual Results: + + + Expected Results: + + + Additional info: + + + EOM +} + +function format_issue_url() { + if [ ${#@} -ne 2 ] ; then + return 1 + fi + local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") + local issue_body=$(urlencode "${2}") + echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" +} + + +echo -ne "Do you use \`sudo\` to call docker? [y|N]: " +read -r -n 1 use_sudo +echo "" + +if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then + export DOCKER_COMMAND="sudo ${DOCKER}" +fi + +echo -ne "Title of new issue?: " +read -r issue_title +echo "" + +issue_url=$(format_issue_url "${issue_title}" "$(template)") + +if which xdg-open 2>/dev/null >/dev/null ; then + echo -ne "Would like to launch this report in your browser? [Y|n]: " + read -r -n 1 launch_now + echo "" + + if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then + xdg-open "${issue_url}" + fi +fi + +echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" + diff --git a/contrib/reprepro/suites.sh b/contrib/reprepro/suites.sh new file mode 100755 index 00000000..efeeca0c --- /dev/null +++ b/contrib/reprepro/suites.sh @@ -0,0 +1,11 @@ +#!/bin/bash +set -e + +cd "$(dirname "$BASH_SOURCE")/../.." + +targets_from() { + git fetch -q https://github.com/docker/docker.git "$1" + git ls-tree -r --name-only origin/master contrib/builder/deb | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|-debootstrap|/Dockerfile$!!g' +} + +{ targets_from master; targets_from release; } | sort -u diff --git a/contrib/syntax/kate/Dockerfile.xml b/contrib/syntax/kate/Dockerfile.xml new file mode 100644 index 00000000..4fdef239 --- /dev/null +++ b/contrib/syntax/kate/Dockerfile.xml @@ -0,0 +1,69 @@ + + + + + + + FROM + MAINTAINER + ENV + RUN + ONBUILD + COPY + ADD + VOLUME + EXPOSE + ENTRYPOINT + CMD + WORKDIR + USER + LABEL + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/contrib/syntax/nano/Dockerfile.nanorc b/contrib/syntax/nano/Dockerfile.nanorc new file mode 100644 index 00000000..80e56dfb --- /dev/null +++ b/contrib/syntax/nano/Dockerfile.nanorc @@ -0,0 +1,26 @@ +## Syntax highlighting for Dockerfiles +syntax "Dockerfile" "Dockerfile[^/]*$" + +## Keywords +icolor red "^(FROM|MAINTAINER|RUN|CMD|LABEL|EXPOSE|ENV|ADD|COPY|ENTRYPOINT|VOLUME|USER|WORKDIR|ONBUILD)[[:space:]]" + +## Brackets & parenthesis +color brightgreen "(\(|\)|\[|\])" + +## Double ampersand +color brightmagenta "&&" + +## Comments +icolor cyan "^[[:space:]]*#.*$" + +## Blank space at EOL +color ,green "[[:space:]]+$" + +## Strings, single-quoted +color brightwhite "'([^']|(\\'))*'" "%[qw]\{[^}]*\}" "%[qw]\([^)]*\)" "%[qw]<[^>]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!" + +## Strings, double-quoted +color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!" + +## Single and double quotes +color brightyellow "('|\")" diff --git a/contrib/syntax/nano/README.md b/contrib/syntax/nano/README.md new file mode 100644 index 00000000..5985208b --- /dev/null +++ b/contrib/syntax/nano/README.md @@ -0,0 +1,32 @@ +Dockerfile.nanorc +================= + +Dockerfile syntax highlighting for nano + +Single User Installation +------------------------ +1. Create a nano syntax directory in your home directory: + * `mkdir -p ~/.nano/syntax` + +2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/` + * `cp Dockerfile.nanorc ~/.nano/syntax/` + +3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file + ``` +## Dockerfile files +include "~/.nano/syntax/Dockerfile.nanorc" + ``` + +System Wide Installation +------------------------ +1. Create a nano syntax directory: + * `mkdir /usr/local/share/nano` + +2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano` + * `cp Dockerfile.nanorc /usr/local/share/nano/` + +3. Add the following to your `/etc/nanorc`: + ``` +## Dockerfile files +include "/usr/local/share/nano/Dockerfile.nanorc" + ``` diff --git a/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences b/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences new file mode 100644 index 00000000..20f0d04c --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences @@ -0,0 +1,24 @@ + + + + + name + Comments + scope + source.dockerfile + settings + + shellVariables + + + name + TM_COMMENT_START + value + # + + + + uuid + 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 + + diff --git a/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage new file mode 100644 index 00000000..61e45ccb --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage @@ -0,0 +1,143 @@ + + + + + fileTypes + + Dockerfile + + name + Dockerfile + patterns + + + captures + + 1 + + name + keyword.control.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR|COPY|LABEL)\s + + + captures + + 1 + + name + keyword.operator.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s + + + begin + " + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + " + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.double.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + begin + ' + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + ' + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.single.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + captures + + 1 + + name + punctuation.whitespace.comment.leading.dockerfile + + 2 + + name + comment.line.number-sign.dockerfile + + 3 + + name + punctuation.definition.comment.dockerfile + + + comment + comment.line + match + ^(\s*)((#).*$\n?) + + + scopeName + source.dockerfile + uuid + a39d8795-59d2-49af-aa00-fe74ee29576e + + diff --git a/contrib/syntax/textmate/Docker.tmbundle/info.plist b/contrib/syntax/textmate/Docker.tmbundle/info.plist new file mode 100644 index 00000000..239f4b0a --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/info.plist @@ -0,0 +1,16 @@ + + + + + contactEmailRot13 + germ@andz.com.ar + contactName + GermanDZ + description + Helpers for Docker. + name + Docker + uuid + 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 + + diff --git a/contrib/syntax/textmate/README.md b/contrib/syntax/textmate/README.md new file mode 100644 index 00000000..ce611018 --- /dev/null +++ b/contrib/syntax/textmate/README.md @@ -0,0 +1,17 @@ +# Docker.tmbundle + +Dockerfile syntax highlighting for TextMate and Sublime Text. + +## Install + +### Sublime Text + +Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). +Search for *Dockerfile Syntax Highlighting* + +### TextMate 2 + +You can install this bundle in TextMate by opening the preferences and going to the bundles tab. After installation it will be automatically updated for you. + +enjoy. + diff --git a/contrib/syntax/textmate/REVIEWERS b/contrib/syntax/textmate/REVIEWERS new file mode 100644 index 00000000..965743df --- /dev/null +++ b/contrib/syntax/textmate/REVIEWERS @@ -0,0 +1 @@ +Asbjorn Enge (@asbjornenge) diff --git a/contrib/syntax/vim/LICENSE b/contrib/syntax/vim/LICENSE new file mode 100644 index 00000000..e67cdabd --- /dev/null +++ b/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/syntax/vim/README.md b/contrib/syntax/vim/README.md new file mode 100644 index 00000000..5aa9bd82 --- /dev/null +++ b/contrib/syntax/vim/README.md @@ -0,0 +1,26 @@ +dockerfile.vim +============== + +Syntax highlighting for Dockerfiles + +Installation +------------ +With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... + +With [Vundle](https://github.com/gmarik/Vundle.vim) + + Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} + +Features +-------- + +The syntax highlighting includes: + +* The directives (e.g. `FROM`) +* Strings +* Comments + +License +------- + +BSD, short and sweet diff --git a/contrib/syntax/vim/doc/dockerfile.txt b/contrib/syntax/vim/doc/dockerfile.txt new file mode 100644 index 00000000..e69e2b7b --- /dev/null +++ b/contrib/syntax/vim/doc/dockerfile.txt @@ -0,0 +1,18 @@ +*dockerfile.txt* Syntax highlighting for Dockerfiles + +Author: Honza Pokorny +License: BSD + +INSTALLATION *installation* + +Drop it on your Pathogen path and you're all set. + +FEATURES *features* + +The syntax highlighting includes: + +* The directives (e.g. FROM) +* Strings +* Comments + + vim:tw=78:et:ft=help:norl: diff --git a/contrib/syntax/vim/ftdetect/dockerfile.vim b/contrib/syntax/vim/ftdetect/dockerfile.vim new file mode 100644 index 00000000..ee10e5d6 --- /dev/null +++ b/contrib/syntax/vim/ftdetect/dockerfile.vim @@ -0,0 +1 @@ +au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile diff --git a/contrib/syntax/vim/syntax/dockerfile.vim b/contrib/syntax/vim/syntax/dockerfile.vim new file mode 100644 index 00000000..220a4db3 --- /dev/null +++ b/contrib/syntax/vim/syntax/dockerfile.vim @@ -0,0 +1,31 @@ +" dockerfile.vim - Syntax highlighting for Dockerfiles +" Maintainer: Honza Pokorny +" Version: 0.5 + + +if exists("b:current_syntax") + finish +endif + +let b:current_syntax = "dockerfile" + +syntax case ignore + +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|LABEL|VOLUME|WORKDIR|COPY)\s/ +highlight link dockerfileKeyword Keyword + +syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ +highlight link dockerfileString String + +syntax match dockerfileComment "\v^\s*#.*$" +highlight link dockerfileComment Comment + +set commentstring=#\ %s + +" match "RUN", "CMD", and "ENTRYPOINT" lines, and parse them as shell +let s:current_syntax = b:current_syntax +unlet b:current_syntax +syntax include @SH syntax/sh.vim +let b:current_syntax = s:current_syntax +syntax region shLine matchgroup=dockerfileKeyword start=/\v^\s*(RUN|CMD|ENTRYPOINT)\s/ end=/\v$/ contains=@SH +" since @SH will handle "\" as part of the same line automatically, this "just works" for line continuation too, but with the caveat that it will highlight "RUN echo '" followed by a newline as if it were a block because the "'" is shell line continuation... not sure how to fix that just yet (TODO) diff --git a/contrib/udev/80-docker.rules b/contrib/udev/80-docker.rules new file mode 100644 index 00000000..f934c017 --- /dev/null +++ b/contrib/udev/80-docker.rules @@ -0,0 +1,3 @@ +# hide docker's loopback devices from udisks, and thus from user desktops +SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" +SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" diff --git a/contrib/vagrant-docker/README.md b/contrib/vagrant-docker/README.md new file mode 100644 index 00000000..4ef9c287 --- /dev/null +++ b/contrib/vagrant-docker/README.md @@ -0,0 +1,50 @@ +# Vagrant integration + +Currently there are at least 4 different projects that we are aware of that deals +with integration with [Vagrant](http://vagrantup.com/) at different levels. One +approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) +which means you can create containers and pull base images on VMs using Docker's +CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), +meaning you can use Vagrant to control Docker containers. + + +### Provisioners + +* [Vocker](https://github.com/fgrehm/vocker) +* [Ventriloquist](https://github.com/fgrehm/ventriloquist) + +### Providers + +* [docker-provider](https://github.com/fgrehm/docker-provider) +* [vagrant-shell](https://github.com/destructuring/vagrant-shell) + +## Setting up Vagrant-docker with the Remote API + +The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: + +``` +description "Docker daemon" + +start on filesystem and started lxc-net +stop on runlevel [!2345] + +respawn + +script + /usr/bin/docker -d -H=tcp://0.0.0.0:2375 +end script +``` + +Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: + +``` +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost +``` + +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) + +Note that because the port has been changed, to run docker commands from within the command line you must run them like this: + +``` +sudo docker -H 0.0.0.0:2375 < commands for docker > +``` diff --git a/daemon/README.md b/daemon/README.md new file mode 100644 index 00000000..64bfcb55 --- /dev/null +++ b/daemon/README.md @@ -0,0 +1,10 @@ +This directory contains code pertaining to running containers and storing images + +Code pertaining to running containers: + + - execdriver + - networkdriver + +Code pertaining to storing images: + + - graphdriver diff --git a/daemon/archive.go b/daemon/archive.go new file mode 100644 index 00000000..0d675a70 --- /dev/null +++ b/daemon/archive.go @@ -0,0 +1,324 @@ +package daemon + +import ( + "errors" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ContainerCopy performs a depracated operation of archiving the resource at +// the specified path in the conatiner identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.Get(name) + if err != nil { + return nil, err + } + + if res[0] == '/' { + res = res[1:] + } + + return container.Copy(res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.Get(name) + if err != nil { + return nil, err + } + + return container.StatPath(path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.Get(name) + if err != nil { + return nil, nil, err + } + + return container.ArchivePath(path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + return container.ExtractToDir(path, noOverwriteDirNonDir, content) +} + +// resolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// a error if the path points to outside the container's rootfs. +func (container *Container) resolvePath(path string) (resolvedPath, absPath string, err error) { + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// statPath is the unexported version of StatPath. Locks and mounts should +// be aquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) statPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.basefs, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} + +// StatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = container.Mount(); err != nil { + return nil, err + } + defer container.Unmount() + + err = container.mountVolumes() + defer container.UnmountVolumes(true) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.resolvePath(path) + if err != nil { + return nil, err + } + + return container.statPath(resolvedPath, absPath) +} + +// ArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (container *Container) ArchivePath(path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = container.Mount(); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.UnmountVolumes(true) + // unmount the container's rootfs + container.Unmount() + } + }() + + if err = container.mountVolumes(); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.resolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.statPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.UnmountVolumes(true) + container.Unmount() + container.Unlock() + return err + }) + + container.LogEvent("archive-path") + + return content, stat, nil +} + +// ExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = container.Mount(); err != nil { + return err + } + defer container.Unmount() + + err = container.mountVolumes() + defer container.UnmountVolumes(true) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.resolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + baseRel, err := filepath.Rel(container.basefs, resolvedPath) + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.hostConfig.ReadonlyRootfs { + return ErrContainerRootfsReadonly + } + + options := &archive.TarOptions{ + ChownOpts: &archive.TarChownOptions{ + UID: 0, GID: 0, // TODO: use config.User? Remap to userns root? + }, + NoOverwriteDirNonDir: noOverwriteDirNonDir, + } + + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + container.LogEvent("extract-to-dir") + + return nil +} + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.hasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} diff --git a/daemon/attach.go b/daemon/attach.go new file mode 100644 index 00000000..79ffa8df --- /dev/null +++ b/daemon/attach.go @@ -0,0 +1,50 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/pkg/stdcopy" +) + +type ContainerAttachWithLogsConfig struct { + InStream io.ReadCloser + OutStream io.Writer + UseStdin, UseStdout, UseStderr bool + Logs, Stream bool +} + +func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *ContainerAttachWithLogsConfig) error { + var errStream io.Writer + + if !container.Config.Tty { + errStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stderr) + c.OutStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stdout) + } else { + errStream = c.OutStream + } + + var stdin io.ReadCloser + var stdout, stderr io.Writer + + if c.UseStdin { + stdin = c.InStream + } + if c.UseStdout { + stdout = c.OutStream + } + if c.UseStderr { + stderr = errStream + } + + return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream) +} + +type ContainerWsAttachWithLogsConfig struct { + InStream io.ReadCloser + OutStream, ErrStream io.Writer + Logs, Stream bool +} + +func (daemon *Daemon) ContainerWsAttachWithLogs(container *Container, c *ContainerWsAttachWithLogsConfig) error { + return container.AttachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream) +} diff --git a/daemon/changes.go b/daemon/changes.go new file mode 100644 index 00000000..55b230b9 --- /dev/null +++ b/daemon/changes.go @@ -0,0 +1,13 @@ +package daemon + +import "github.com/docker/docker/pkg/archive" + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + container, err := daemon.Get(name) + if err != nil { + return nil, err + } + + return container.Changes() +} diff --git a/daemon/commit.go b/daemon/commit.go new file mode 100644 index 00000000..5921d77e --- /dev/null +++ b/daemon/commit.go @@ -0,0 +1,60 @@ +package daemon + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/runconfig" +) + +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + Config *runconfig.Config +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository +func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) { + if c.Pause && !container.IsPaused() { + container.Pause() + defer container.Unpause() + } + + rwTar, err := container.ExportRw() + if err != nil { + return nil, err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + // Create a new image from the container's base layers + a new layer from container changes + var ( + containerID, parentImageID string + containerConfig *runconfig.Config + ) + + if container != nil { + containerID = container.ID + parentImageID = container.ImageID + containerConfig = container.Config + } + + img, err := daemon.graph.Create(rwTar, containerID, parentImageID, c.Comment, c.Author, containerConfig, c.Config) + if err != nil { + return nil, err + } + + // Register the image if needed + if c.Repo != "" { + if err := daemon.repositories.Tag(c.Repo, c.Tag, img.ID, true); err != nil { + return img, err + } + } + container.LogEvent("commit") + return img, nil +} diff --git a/daemon/config.go b/daemon/config.go new file mode 100644 index 00000000..874ea491 --- /dev/null +++ b/daemon/config.go @@ -0,0 +1,62 @@ +package daemon + +import ( + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/runconfig" +) + +const ( + defaultNetworkMtu = 1500 + disableNetworkBridge = "none" +) + +// CommonConfig defines the configuration of a docker daemon which are +// common across platforms. +type CommonConfig struct { + AutoRestart bool + Bridge bridgeConfig // Bridge holds bridge network specific configuration. + Context map[string][]string + CorsHeaders string + DisableBridge bool + Dns []string + DnsSearch []string + EnableCors bool + ExecDriver string + ExecOptions []string + ExecRoot string + GraphDriver string + GraphOptions []string + Labels []string + LogConfig runconfig.LogConfig + Mtu int + Pidfile string + Root string + TrustKeyPath string + DefaultNetwork string + NetworkKVStore string +} + +// InstallCommonFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) string) { + cmd.Var(opts.NewListOptsRef(&config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options")) + cmd.Var(opts.NewListOptsRef(&config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set exec driver options")) + cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file")) + cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime")) + cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, "/var/run/docker", usageFn("Root of the Docker execdriver")) + cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run")) + cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use")) + cmd.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, defaultExec, usageFn("Exec driver to use")) + cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU")) + cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header")) + cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API")) + // FIXME: why the inconsistency between "hosts" and "sockets"? + cmd.Var(opts.NewListOptsRef(&config.Dns, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) + cmd.Var(opts.NewListOptsRef(&config.DnsSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) + cmd.Var(opts.NewListOptsRef(&config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon")) + cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs")) + cmd.Var(opts.NewMapOpts(config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options")) +} diff --git a/daemon/config_experimental.go b/daemon/config_experimental.go new file mode 100644 index 00000000..b3a1d3fb --- /dev/null +++ b/daemon/config_experimental.go @@ -0,0 +1,10 @@ +// +build experimental + +package daemon + +import flag "github.com/docker/docker/pkg/mflag" + +func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { + cmd.StringVar(&config.DefaultNetwork, []string{"-default-network"}, "", usageFn("Set default network")) + cmd.StringVar(&config.NetworkKVStore, []string{"-kv-store"}, "", usageFn("Set KV Store configuration")) +} diff --git a/daemon/config_linux.go b/daemon/config_linux.go new file mode 100644 index 00000000..783637ea --- /dev/null +++ b/daemon/config_linux.go @@ -0,0 +1,76 @@ +package daemon + +import ( + "net" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/ulimit" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExec = "native" +) + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +type Config struct { + CommonConfig + + // Fields below here are platform specific. + + EnableSelinuxSupport bool + SocketGroup string + Ulimits map[string]*ulimit.Ulimit +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + EnableIPv6 bool + EnableIPTables bool + EnableIPForward bool + EnableIPMasq bool + EnableUserlandProxy bool + DefaultIP net.IP + Iface string + IP string + FixedCIDR string + FixedCIDRv6 string + DefaultGatewayIPv4 net.IP + DefaultGatewayIPv6 net.IP + InterContainerCommunication bool +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(cmd, usageFn) + + // Then platform-specific install flags + cmd.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, usageFn("Enable selinux support")) + cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", usageFn("Group for the unix socket")) + config.Ulimits = make(map[string]*ulimit.Ulimit) + cmd.Var(opts.NewUlimitOpt(&config.Ulimits), []string{"-default-ulimit"}, usageFn("Set default ulimits for containers")) + cmd.BoolVar(&config.Bridge.EnableIPTables, []string{"#iptables", "-iptables"}, true, usageFn("Enable addition of iptables rules")) + cmd.BoolVar(&config.Bridge.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, usageFn("Enable net.ipv4.ip_forward")) + cmd.BoolVar(&config.Bridge.EnableIPMasq, []string{"-ip-masq"}, true, usageFn("Enable IP masquerading")) + cmd.BoolVar(&config.Bridge.EnableIPv6, []string{"-ipv6"}, false, usageFn("Enable IPv6 networking")) + cmd.StringVar(&config.Bridge.IP, []string{"#bip", "-bip"}, "", usageFn("Specify network bridge IP")) + cmd.StringVar(&config.Bridge.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) + cmd.StringVar(&config.Bridge.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) + cmd.StringVar(&config.Bridge.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) + cmd.Var(opts.NewIpOpt(&config.Bridge.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) + cmd.Var(opts.NewIpOpt(&config.Bridge.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) + cmd.BoolVar(&config.Bridge.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) + cmd.Var(opts.NewIpOpt(&config.Bridge.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) + cmd.BoolVar(&config.Bridge.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) + + config.attachExperimentalFlags(cmd, usageFn) +} diff --git a/daemon/config_stub.go b/daemon/config_stub.go new file mode 100644 index 00000000..796e6b6e --- /dev/null +++ b/daemon/config_stub.go @@ -0,0 +1,8 @@ +// +build !experimental + +package daemon + +import flag "github.com/docker/docker/pkg/mflag" + +func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { +} diff --git a/daemon/config_windows.go b/daemon/config_windows.go new file mode 100644 index 00000000..dd7bb82a --- /dev/null +++ b/daemon/config_windows.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "os" + + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + defaultPidFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker.pid" + defaultGraph = os.Getenv("programdata") + string(os.PathSeparator) + "docker" + defaultExec = "windows" +) + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + VirtualSwitchName string +} + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e windows` +type Config struct { + CommonConfig + + // Fields below here are platform specific. (There are none presently + // for the Windows daemon.) +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(cmd, usageFn) + + // Then platform-specific install flags. + cmd.StringVar(&config.Bridge.VirtualSwitchName, []string{"b", "-bridge"}, "", "Attach containers to a virtual switch") +} diff --git a/daemon/container.go b/daemon/container.go new file mode 100644 index 00000000..49c1f41e --- /dev/null +++ b/daemon/container.go @@ -0,0 +1,1243 @@ +package daemon + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/opencontainers/runc/libcontainer/label" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" +) + +var ( + ErrNotATTY = errors.New("The PTY is not a file") + ErrNoTTY = errors.New("No PTY found") + ErrContainerStart = errors.New("The container failed to start. Unknown error") + ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") + ErrContainerRootfsReadonly = errors.New("container rootfs is marked read-only") +) + +type ErrContainerNotRunning struct { + id string +} + +func (e ErrContainerNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.id) +} + +type StreamConfig struct { + stdout *broadcastwriter.BroadcastWriter + stderr *broadcastwriter.BroadcastWriter + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// CommonContainer holds the settings for a container which are applicable +// across all platforms supported by the daemon. +type CommonContainer struct { + StreamConfig + + *State `json:"State"` // Needed for remote api version <= 1.11 + root string // Path to the "home" of the container, including metadata. + basefs string // Path to the graphdriver mountpoint + + ID string + Created time.Time + Path string + Args []string + Config *runconfig.Config + ImageID string `json:"Image"` + NetworkSettings *network.Settings + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + Driver string + ExecDriver string + MountLabel, ProcessLabel string + RestartCount int + UpdateDns bool + HasBeenStartedBefore bool + + MountPoints map[string]*mountPoint + Volumes map[string]string // Deprecated since 1.7, kept for backwards compatibility + VolumesRW map[string]bool // Deprecated since 1.7, kept for backwards compatibility + + hostConfig *runconfig.HostConfig + command *execdriver.Command + + monitor *containerMonitor + execCommands *execStore + daemon *Daemon + // logDriver for closing + logDriver logger.Logger + logCopier *logger.Copier +} + +func (container *Container) FromDisk() error { + pth, err := container.jsonPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it + if err := dec.Decode(container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +func (container *Container) toDisk() error { + data, err := json.Marshal(container) + if err != nil { + return err + } + + pth, err := container.jsonPath() + if err != nil { + return err + } + + if err := ioutil.WriteFile(pth, data, 0666); err != nil { + return err + } + + return container.WriteHostConfig() +} + +func (container *Container) ToDisk() error { + container.Lock() + err := container.toDisk() + container.Unlock() + return err +} + +func (container *Container) readHostConfig() error { + container.hostConfig = &runconfig.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.hostConfig, + // but that's OK, since we just did that above.) + pth, err := container.hostConfigPath() + if err != nil { + return err + } + + _, err = os.Stat(pth) + if os.IsNotExist(err) { + return nil + } + + f, err := os.Open(pth) + if err != nil { + return err + } + defer f.Close() + + return json.NewDecoder(f).Decode(&container.hostConfig) +} + +func (container *Container) WriteHostConfig() error { + data, err := json.Marshal(container.hostConfig) + if err != nil { + return err + } + + pth, err := container.hostConfigPath() + if err != nil { + return err + } + + return ioutil.WriteFile(pth, data, 0666) +} + +func (container *Container) LogEvent(action string) { + d := container.daemon + d.EventsService.Log( + action, + container.ID, + container.Config.Image, + ) +} + +// Evaluates `path` in the scope of the container's basefs, with proper path +// sanitisation. Symlinks are all scoped to the basefs of the container, as +// though the container's basefs was `/`. +// +// The basefs of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's basefs +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) + return r, e +} + +// Evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) +} + +func (container *Container) Start() (err error) { + container.Lock() + defer container.Unlock() + + if container.Running { + return nil + } + + if container.removalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.setError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode == 0 { + container.ExitCode = 128 + } + container.toDisk() + container.cleanup() + container.LogEvent("die") + } + }() + + if err := container.Mount(); err != nil { + return err + } + + // No-op if non-Windows. Once the container filesystem is mounted, + // prepare the layer to boot using the Windows driver. + if err := container.PrepareStorage(); err != nil { + return err + } + + if err := container.initializeNetworking(); err != nil { + return err + } + linkedEnv, err := container.setupLinkedContainers() + if err != nil { + return err + } + if err := container.setupWorkingDirectory(); err != nil { + return err + } + env := container.createDaemonEnvironment(linkedEnv) + if err := populateCommand(container, env); err != nil { + return err + } + + mounts, err := container.setupMounts() + if err != nil { + return err + } + + container.command.Mounts = mounts + return container.waitForStart() +} + +func (container *Container) Run() error { + if err := container.Start(); err != nil { + return err + } + container.HasBeenStartedBefore = true + container.WaitStop(-1 * time.Second) + return nil +} + +func (container *Container) Output() (output []byte, err error) { + pipe := container.StdoutPipe() + defer pipe.Close() + if err := container.Start(); err != nil { + return nil, err + } + output, err = ioutil.ReadAll(pipe) + container.WaitStop(-1 * time.Second) + return output, err +} + +// StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the container's active process. +// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". + +func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { + return streamConfig.stdinPipe +} + +func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { + reader, writer := io.Pipe() + streamConfig.stdout.AddWriter(writer) + return ioutils.NewBufReader(reader) +} + +func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { + reader, writer := io.Pipe() + streamConfig.stderr.AddWriter(writer) + return ioutils.NewBufReader(reader) +} + +func (container *Container) isNetworkAllocated() bool { + return container.NetworkSettings.IPAddress != "" +} + +// cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (container *Container) cleanup() { + container.ReleaseNetwork() + + disableAllActiveLinks(container) + + if err := container.CleanupStorage(); err != nil { + logrus.Errorf("%v: Failed to cleanup storage: %v", container.ID, err) + } + + if err := container.Unmount(); err != nil { + logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) + } + + for _, eConfig := range container.execCommands.s { + container.daemon.unregisterExecCommand(eConfig) + } + + container.UnmountVolumes(false) +} + +func (container *Container) KillSig(sig int) error { + logrus.Debugf("Sending %d to %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + + if !container.Running { + return ErrContainerNotRunning{container.ID} + } + + // signal to the monitor that it should not restart the container + // after we send the kill signal + container.monitor.ExitOnNext() + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on it's next event + // loop is enough + if container.Restarting { + return nil + } + + if err := container.daemon.Kill(container, sig); err != nil { + return err + } + container.LogEvent("kill") + return nil +} + +// Wrapper aroung KillSig() suppressing "no such process" error. +func (container *Container) killPossiblyDeadProcess(sig int) error { + err := container.KillSig(sig) + if err == syscall.ESRCH { + logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig) + return nil + } + return err +} + +func (container *Container) Pause() error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return ErrContainerNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + if err := container.daemon.execDriver.Pause(container.command); err != nil { + return err + } + container.Paused = true + container.LogEvent("pause") + return nil +} + +func (container *Container) Unpause() error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not running + if !container.Running { + return ErrContainerNotRunning{container.ID} + } + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := container.daemon.execDriver.Unpause(container.command); err != nil { + return err + } + container.Paused = false + container.LogEvent("unpause") + return nil +} + +func (container *Container) Kill() error { + if !container.IsRunning() { + return ErrContainerNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := container.killPossiblyDeadProcess(9); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be exec driver specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + + if container.IsRunning() { + container.WaitStop(2 * time.Second) + if container.IsRunning() { + return err + } + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + return err + } + + container.WaitStop(-1 * time.Second) + return nil +} + +func (container *Container) Stop(seconds int) error { + if !container.IsRunning() { + return nil + } + + // 1. Send a SIGTERM + if err := container.killPossiblyDeadProcess(15); err != nil { + logrus.Infof("Failed to send SIGTERM to the process, force killing") + if err := container.killPossiblyDeadProcess(9); err != nil { + return err + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) + // 3. If it doesn't, then send SIGKILL + if err := container.Kill(); err != nil { + container.WaitStop(-1 * time.Second) + return err + } + } + + container.LogEvent("stop") + return nil +} + +func (container *Container) Restart(seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := container.Mount(); err == nil { + defer container.Unmount() + } + + if err := container.Stop(seconds); err != nil { + return err + } + + if err := container.Start(); err != nil { + return err + } + + container.LogEvent("restart") + return nil +} + +func (container *Container) Resize(h, w int) error { + if !container.IsRunning() { + return ErrContainerNotRunning{container.ID} + } + if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil { + return err + } + container.LogEvent("resize") + return nil +} + +func (container *Container) Export() (archive.Archive, error) { + if err := container.Mount(); err != nil { + return nil, err + } + + archive, err := archive.Tar(container.basefs, archive.Uncompressed) + if err != nil { + container.Unmount() + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }) + container.LogEvent("export") + return arch, err +} + +func (container *Container) Mount() error { + return container.daemon.Mount(container) +} + +func (container *Container) changes() ([]archive.Change, error) { + return container.daemon.Changes(container) +} + +func (container *Container) Changes() ([]archive.Change, error) { + container.Lock() + defer container.Unlock() + return container.changes() +} + +func (container *Container) GetImage() (*image.Image, error) { + if container.daemon == nil { + return nil, fmt.Errorf("Can't get image of unregistered container") + } + return container.daemon.graph.Get(container.ImageID) +} + +func (container *Container) Unmount() error { + return container.daemon.Unmount(container) +} + +func (container *Container) hostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +func (container *Container) jsonPath() (string, error) { + return container.GetRootResourcePath("config.json") +} + +// This method must be exported to be used from the lxc template +// This directory is only usable when the container is running +func (container *Container) RootfsPath() string { + return container.basefs +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} + +func (container *Container) Copy(resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := container.Mount(); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.UnmountVolumes(true) + // unmount the container's rootfs + container.Unmount() + } + }() + + if err := container.mountVolumes(); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + if err := container.PrepareStorage(); err != nil { + container.Unmount() + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.CleanupStorage() + container.UnmountVolumes(true) + container.Unmount() + container.Unlock() + return err + }) + container.LogEvent("copy") + return reader, nil +} + +// Returns true if the container exposes a certain port +func (container *Container) Exposes(p nat.Port) bool { + _, exists := container.Config.ExposedPorts[p] + return exists +} + +func (container *Container) HostConfig() *runconfig.HostConfig { + return container.hostConfig +} + +func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) { + container.hostConfig = hostConfig +} + +func (container *Container) getLogConfig() runconfig.LogConfig { + cfg := container.hostConfig.LogConfig + if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured + if cfg.Type == "" { + cfg.Type = jsonfilelog.Name + } + return cfg + } + // Use daemon's default log config for containers + return container.daemon.defaultLogConfig +} + +func (container *Container) getLogger() (logger.Logger, error) { + if container.logDriver != nil && container.IsRunning() { + return container.logDriver, nil + } + cfg := container.getLogConfig() + if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { + return nil, err + } + c, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("Failed to get logging factory: %v", err) + } + ctx := logger.Context{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID, + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + return c(ctx) +} + +func (container *Container) startLogging() error { + cfg := container.getLogConfig() + if cfg.Type == "none" { + return nil // do not start logging routines + } + + l, err := container.getLogger() + if err != nil { + return fmt.Errorf("Failed to initialize logging driver: %v", err) + } + + copier, err := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + if err != nil { + return err + } + container.logCopier = copier + copier.Run() + container.logDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +func (container *Container) waitForStart() error { + container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) + + // block until we either receive an error from the initial start of the container's + // process or until the process is running in the container + select { + case <-container.monitor.startSignal: + case err := <-promise.Go(container.monitor.Start): + return err + } + + return nil +} + +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.hostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +func (container *Container) GetMountLabel() string { + if container.hostConfig.Privileged { + return "" + } + return container.MountLabel +} + +func (container *Container) Stats() (*execdriver.ResourceStats, error) { + return container.daemon.Stats(container) +} + +func (c *Container) LogDriverType() string { + c.Lock() + defer c.Unlock() + if c.hostConfig.LogConfig.Type == "" { + return c.daemon.defaultLogConfig.Type + } + return c.hostConfig.LogConfig.Type +} + +func (container *Container) GetExecIDs() []string { + return container.execCommands.List() +} + +func (container *Container) Exec(execConfig *execConfig) error { + container.Lock() + defer container.Unlock() + + waitStart := make(chan struct{}) + + callback := func(processConfig *execdriver.ProcessConfig, pid int) { + if processConfig.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave + // which we close here. + if c, ok := processConfig.Stdout.(io.Closer); ok { + c.Close() + } + } + close(waitStart) + } + + // We use a callback here instead of a goroutine and an chan for + // syncronization purposes + cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) }) + + // Exec should not return until the process is actually running + select { + case <-waitStart: + case err := <-cErr: + return err + } + + return nil +} + +func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error { + var ( + err error + exitCode int + ) + pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin) + exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback) + if err != nil { + logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) + } + logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) + if execConfig.OpenStdin { + if err := execConfig.StreamConfig.stdin.Close(); err != nil { + logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err) + } + } + if err := execConfig.StreamConfig.stdout.Clean(); err != nil { + logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err) + } + if err := execConfig.StreamConfig.stderr.Clean(); err != nil { + logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err) + } + if execConfig.ProcessConfig.Terminal != nil { + if err := execConfig.ProcessConfig.Terminal.Close(); err != nil { + logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) + } + } + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + container.execCommands.Delete(execConfig.ID) + return err +} + +func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { + return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr) +} + +func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error { + if logs { + logDriver, err := c.getLogger() + if err != nil { + return err + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && stdout != nil { + stdout.Write(msg.Line) + } + if msg.Source == "stderr" && stderr != nil { + stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + c.LogEvent("attach") + + //stream + if stream { + var stdinPipe io.ReadCloser + if stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debugf("Closing buffered stdin pipe") + io.Copy(w, stdin) + }() + stdinPipe = r + } + <-c.Attach(stdinPipe, stdout, stderr) + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if c.Config.StdinOnce && !c.Config.Tty { + c.WaitStop(-1 * time.Second) + } + } + return nil +} + +func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { + var ( + cStdout, cStderr io.ReadCloser + cStdin io.WriteCloser + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if stdin != nil && openStdin { + cStdin = streamConfig.StdinPipe() + wg.Add(1) + } + + if stdout != nil { + cStdout = streamConfig.StdoutPipe() + wg.Add(1) + } + + if stderr != nil { + cStderr = streamConfig.StderrPipe() + wg.Add(1) + } + + // Connect stdin of container to the http conn. + go func() { + if stdin == nil || !openStdin { + return + } + logrus.Debugf("attach: stdin: begin") + defer func() { + if stdinOnce && !tty { + cStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + } + wg.Done() + logrus.Debugf("attach: stdin: end") + }() + + var err error + if tty { + _, err = copyEscapable(cStdin, stdin) + } else { + _, err = io.Copy(cStdin, stdin) + + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + return + } + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + defer func() { + // Make sure stdin gets closed + if stdin != nil { + stdin.Close() + } + streamPipe.Close() + wg.Done() + logrus.Debugf("attach: %s: end", name) + }() + + logrus.Debugf("attach: %s: begin", name) + _, err := io.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + } + + go attachStream("stdout", stdout, cStdout) + go attachStream("stderr", stderr, cStderr) + + return promise.Go(func() error { + wg.Wait() + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +// Code c/c from io.Copy() modified to handle escape sequence +func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + // char 16 is C-p + if nr == 1 && buf[0] == 16 { + nr, er = src.Read(buf) + // char 17 is C-q + if nr == 1 && buf[0] == 17 { + if err := src.Close(); err != nil { + return 0, err + } + return 0, nil + } + } + // ---- End of docker + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} + +func (container *Container) networkMounts() []execdriver.Mount { + var mounts []execdriver.Mount + mode := "Z" + if container.hostConfig.NetworkMode.IsContainer() { + mode = "z" + } + if container.ResolvConfPath != "" { + label.Relabel(container.ResolvConfPath, container.MountLabel, mode) + mounts = append(mounts, execdriver.Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: !container.hostConfig.ReadonlyRootfs, + Private: true, + }) + } + if container.HostnamePath != "" { + label.Relabel(container.HostnamePath, container.MountLabel, mode) + mounts = append(mounts, execdriver.Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: !container.hostConfig.ReadonlyRootfs, + Private: true, + }) + } + if container.HostsPath != "" { + label.Relabel(container.HostsPath, container.MountLabel, mode) + mounts = append(mounts, execdriver.Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: !container.hostConfig.ReadonlyRootfs, + Private: true, + }) + } + return mounts +} + +func (container *Container) addBindMountPoint(name, source, destination string, rw bool) { + container.MountPoints[destination] = &mountPoint{ + Name: name, + Source: source, + Destination: destination, + RW: rw, + } +} + +func (container *Container) addLocalMountPoint(name, destination string, rw bool) { + container.MountPoints[destination] = &mountPoint{ + Name: name, + Driver: volume.DefaultDriverName, + Destination: destination, + RW: rw, + } +} + +func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &mountPoint{ + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + } +} + +func (container *Container) isDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +func (container *Container) prepareMountPoints() error { + for _, config := range container.MountPoints { + if len(config.Driver) > 0 { + v, err := createVolume(config.Name, config.Driver) + if err != nil { + return err + } + config.Volume = v + } + } + return nil +} + +func (container *Container) removeMountPoints() error { + for _, m := range container.MountPoints { + if m.Volume != nil { + if err := removeVolume(m.Volume); err != nil { + return err + } + } + } + return nil +} + +func (container *Container) shouldRestart() bool { + return container.hostConfig.RestartPolicy.Name == "always" || + (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) +} + +func (container *Container) mountVolumes() error { + mounts, err := container.setupMounts() + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, "bind", opts); err != nil { + return err + } + } + + return nil +} + +func (container *Container) copyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + path, err := v.Mount() + if err != nil { + return err + } + + if err := copyExistingContents(rootfs, path); err != nil { + return err + } + + return v.Unmount() +} diff --git a/daemon/container_unit_test.go b/daemon/container_unit_test.go new file mode 100644 index 00000000..ab30a8e3 --- /dev/null +++ b/daemon/container_unit_test.go @@ -0,0 +1,33 @@ +package daemon + +import "testing" + +func TestGetFullName(t *testing.T) { + name, err := GetFullContainerName("testing") + if err != nil { + t.Fatal(err) + } + if name != "/testing" { + t.Fatalf("Expected /testing got %s", name) + } + if _, err := GetFullContainerName(""); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestValidContainerNames(t *testing.T) { + invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} + validNames := []string{"word-word", "word_word", "1weoid"} + + for _, name := range invalidNames { + if validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is not a valid container name and was returned as valid.", name) + } + } + + for _, name := range validNames { + if !validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is a valid container name and was returned as invalid.", name) + } + } +} diff --git a/daemon/container_unix.go b/daemon/container_unix.go new file mode 100644 index 00000000..ff62de9c --- /dev/null +++ b/daemon/container_unix.go @@ -0,0 +1,1156 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/links" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/ulimit" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" +) + +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +type Container struct { + CommonContainer + + // Fields below here are platform specific. + + AppArmorProfile string + activeLinks map[string]*links.Link +} + +func killProcessDirectly(container *Container) error { + if _, err := container.WaitStop(10 * time.Second); err != nil { + // Ensure that we don't kill ourselves + if pid := container.GetPid(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + if err != syscall.ESRCH { + return err + } + logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid) + } + } + } + return nil +} + +func (container *Container) setupLinkedContainers() ([]string, error) { + var ( + env []string + daemon = container.daemon + ) + children, err := daemon.Children(container.Name) + if err != nil { + return nil, err + } + + if len(children) > 0 { + container.activeLinks = make(map[string]*links.Link, len(children)) + + // If we encounter an error make sure that we rollback any network + // config and iptables changes + rollback := func() { + for _, link := range container.activeLinks { + link.Disable() + } + container.activeLinks = nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + link, err := links.NewLink( + container.NetworkSettings.IPAddress, + child.NetworkSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + if err != nil { + rollback() + return nil, err + } + + container.activeLinks[link.Alias()] = link + if err := link.Enable(); err != nil { + rollback() + return nil, err + } + + for _, envVar := range link.ToEnv() { + env = append(env, envVar) + } + } + } + return env, nil +} + +func (container *Container) createDaemonEnvironment(linkedEnv []string) []string { + // if a domain name was specified, append it to the hostname (see #7851) + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + // Setup environment + env := []string{ + "PATH=" + DefaultPathEnv, + "HOSTNAME=" + fullHostname, + // Note: we don't set HOME here because it'll get autoset intelligently + // based on the value of USER inside dockerinit, but only if it isn't + // set already (ie, that can be overridden by setting HOME via -e or ENV + // in a Dockerfile). + } + if container.Config.Tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + + return env +} + +func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) { + device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = deviceMapping.PathInContainer + return append(devs, device), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1) + devs = append(devs, childDevice) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, nil + } + + return devs, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) +} + +func populateCommand(c *Container, env []string) error { + var en *execdriver.Network + if !c.Config.NetworkDisabled { + en = &execdriver.Network{ + NamespacePath: c.NetworkSettings.SandboxKey, + } + + parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := c.getNetworkedContainer() + if err != nil { + return err + } + en.ContainerID = nc.ID + } + } + + ipc := &execdriver.Ipc{} + + if c.hostConfig.IpcMode.IsContainer() { + ic, err := c.getIpcContainer() + if err != nil { + return err + } + ipc.ContainerID = ic.ID + } else { + ipc.HostIpc = c.hostConfig.IpcMode.IsHost() + } + + pid := &execdriver.Pid{} + pid.HostPid = c.hostConfig.PidMode.IsHost() + + uts := &execdriver.UTS{ + HostUTS: c.hostConfig.UTSMode.IsHost(), + } + + // Build lists of devices allowed and created within the container. + var userSpecifiedDevices []*configs.Device + for _, deviceMapping := range c.hostConfig.Devices { + devs, err := getDevicesFromPath(deviceMapping) + if err != nil { + return err + } + + userSpecifiedDevices = append(userSpecifiedDevices, devs...) + } + + allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices) + + autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices) + + // TODO: this can be removed after lxc-conf is fully deprecated + lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig) + if err != nil { + return err + } + + var rlimits []*ulimit.Rlimit + ulimits := c.hostConfig.Ulimits + + // Merge ulimits with daemon defaults + ulIdx := make(map[string]*ulimit.Ulimit) + for _, ul := range ulimits { + ulIdx[ul.Name] = ul + } + for name, ul := range c.daemon.config.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + + for _, limit := range ulimits { + rl, err := limit.GetRlimit() + if err != nil { + return err + } + rlimits = append(rlimits, rl) + } + + resources := &execdriver.Resources{ + Memory: c.hostConfig.Memory, + MemorySwap: c.hostConfig.MemorySwap, + CpuShares: c.hostConfig.CpuShares, + CpusetCpus: c.hostConfig.CpusetCpus, + CpusetMems: c.hostConfig.CpusetMems, + CpuPeriod: c.hostConfig.CpuPeriod, + CpuQuota: c.hostConfig.CpuQuota, + BlkioWeight: c.hostConfig.BlkioWeight, + Rlimits: rlimits, + OomKillDisable: c.hostConfig.OomKillDisable, + MemorySwappiness: -1, + } + + if c.hostConfig.MemorySwappiness != nil { + resources.MemorySwappiness = *c.hostConfig.MemorySwappiness + } + + processConfig := execdriver.ProcessConfig{ + Privileged: c.hostConfig.Privileged, + Entrypoint: c.Path, + Arguments: c.Args, + Tty: c.Config.Tty, + User: c.Config.User, + } + + processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} + processConfig.Env = env + + c.command = &execdriver.Command{ + ID: c.ID, + Rootfs: c.RootfsPath(), + ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, + InitPath: "/.dockerinit", + WorkingDir: c.Config.WorkingDir, + Network: en, + Ipc: ipc, + Pid: pid, + UTS: uts, + Resources: resources, + AllowedDevices: allowedDevices, + AutoCreatedDevices: autoCreatedDevices, + CapAdd: c.hostConfig.CapAdd.Slice(), + CapDrop: c.hostConfig.CapDrop.Slice(), + GroupAdd: c.hostConfig.GroupAdd, + ProcessConfig: processConfig, + ProcessLabel: c.GetProcessLabel(), + MountLabel: c.GetMountLabel(), + LxcConfig: lxcConfig, + AppArmorProfile: c.AppArmorProfile, + CgroupParent: c.hostConfig.CgroupParent, + } + + return nil +} + +func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device { + if len(userDevices) == 0 { + return defaultDevices + } + + paths := map[string]*configs.Device{} + for _, d := range userDevices { + paths[d.Path] = d + } + + var devs []*configs.Device + for _, d := range defaultDevices { + if _, defined := paths[d.Path]; !defined { + devs = append(devs, d) + } + } + return append(devs, userDevices...) +} + +// GetSize, return real size, virtual size +func (container *Container) GetSize() (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + driver = container.daemon.driver + ) + + if err := container.Mount(); err != nil { + logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer container.Unmount() + + initID := fmt.Sprintf("%s-init", container.ID) + sizeRw, err = driver.DiffSize(container.ID, initID) + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if _, err = os.Stat(container.basefs); err == nil { + if sizeRootfs, err = directory.Size(container.basefs); err != nil { + sizeRootfs = -1 + } + } + return sizeRw, sizeRootfs +} + +// Attempt to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a +// network mount file +func (container *Container) trySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +func (container *Container) buildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + + if container.Config.Domainname != "" { + return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) + } + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, error) { + var ( + joinOptions []libnetwork.EndpointOption + err error + dns []string + dnsSearch []string + ) + + joinOptions = append(joinOptions, libnetwork.JoinOptionHostname(container.Config.Hostname), + libnetwork.JoinOptionDomainname(container.Config.Domainname)) + + if container.hostConfig.NetworkMode.IsHost() { + joinOptions = append(joinOptions, libnetwork.JoinOptionUseDefaultSandbox()) + } + + container.HostsPath, err = container.GetRootResourcePath("hosts") + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.JoinOptionHostsPath(container.HostsPath)) + + container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.JoinOptionResolvConfPath(container.ResolvConfPath)) + + if len(container.hostConfig.Dns) > 0 { + dns = container.hostConfig.Dns + } else if len(container.daemon.config.Dns) > 0 { + dns = container.daemon.config.Dns + } + + for _, d := range dns { + joinOptions = append(joinOptions, libnetwork.JoinOptionDNS(d)) + } + + if len(container.hostConfig.DnsSearch) > 0 { + dnsSearch = container.hostConfig.DnsSearch + } else if len(container.daemon.config.DnsSearch) > 0 { + dnsSearch = container.daemon.config.DnsSearch + } + + for _, ds := range dnsSearch { + joinOptions = append(joinOptions, libnetwork.JoinOptionDNSSearch(ds)) + } + + if container.NetworkSettings.SecondaryIPAddresses != nil { + name := container.Config.Hostname + if container.Config.Domainname != "" { + name = name + "." + container.Config.Domainname + } + + for _, a := range container.NetworkSettings.SecondaryIPAddresses { + joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(name, a.Addr)) + } + } + + var childEndpoints, parentEndpoints []string + + children, err := container.daemon.Children(container.Name) + if err != nil { + return nil, err + } + + for linkAlias, child := range children { + _, alias := path.Split(linkAlias) + // allow access to the linked container via the alias, real name, and container hostname + aliasList := alias + " " + child.Config.Hostname + // only add the name if alias isn't equal to the name + if alias != child.Name[1:] { + aliasList = aliasList + " " + child.Name[1:] + } + joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(aliasList, child.NetworkSettings.IPAddress)) + if child.NetworkSettings.EndpointID != "" { + childEndpoints = append(childEndpoints, child.NetworkSettings.EndpointID) + } + } + + for _, extraHost := range container.hostConfig.ExtraHosts { + // allow IPv6 addresses in extra hosts; only split on first ":" + parts := strings.SplitN(extraHost, ":", 2) + joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(parts[0], parts[1])) + } + + refs := container.daemon.ContainerGraph().RefPaths(container.ID) + for _, ref := range refs { + if ref.ParentID == "0" { + continue + } + + c, err := container.daemon.Get(ref.ParentID) + if err != nil { + logrus.Error(err) + } + + if c != nil && !container.daemon.config.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() { + logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress) + joinOptions = append(joinOptions, libnetwork.JoinOptionParentUpdate(c.NetworkSettings.EndpointID, ref.Name, container.NetworkSettings.IPAddress)) + if c.NetworkSettings.EndpointID != "" { + parentEndpoints = append(parentEndpoints, c.NetworkSettings.EndpointID) + } + } + } + + linkOptions := options.Generic{ + netlabel.GenericData: options.Generic{ + "ParentEndpoints": parentEndpoints, + "ChildEndpoints": childEndpoints, + }, + } + + joinOptions = append(joinOptions, libnetwork.JoinOptionGeneric(linkOptions)) + + return joinOptions, nil +} + +func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) { + if ep == nil { + return nil, fmt.Errorf("invalid endpoint while building port map info") + } + + if networkSettings == nil { + return nil, fmt.Errorf("invalid networksettings while building port map info") + } + + driverInfo, err := ep.DriverInfo() + if err != nil { + return nil, err + } + + if driverInfo == nil { + // It is not an error for epInfo to be nil + return networkSettings, nil + } + + if mac, ok := driverInfo[netlabel.MacAddress]; ok { + networkSettings.MacAddress = mac.(net.HardwareAddr).String() + } + + networkSettings.Ports = nat.PortMap{} + + if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { + if exposedPorts, ok := expData.([]types.TransportPort); ok { + for _, tp := range exposedPorts { + natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) + if err != nil { + return nil, fmt.Errorf("Error parsing Port value(%s):%v", tp.Port, err) + } + networkSettings.Ports[natPort] = nil + } + } + } + + mapData, ok := driverInfo[netlabel.PortMap] + if !ok { + return networkSettings, nil + } + + if portMapping, ok := mapData.([]types.PortBinding); ok { + for _, pp := range portMapping { + natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) + if err != nil { + return nil, err + } + natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} + networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg) + } + } + + return networkSettings, nil +} + +func (container *Container) buildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) { + if ep == nil { + return nil, fmt.Errorf("invalid endpoint while building port map info") + } + + if networkSettings == nil { + return nil, fmt.Errorf("invalid networksettings while building port map info") + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return networkSettings, nil + } + + ifaceList := epInfo.InterfaceList() + if len(ifaceList) == 0 { + return networkSettings, nil + } + + iface := ifaceList[0] + + ones, _ := iface.Address().Mask.Size() + networkSettings.IPAddress = iface.Address().IP.String() + networkSettings.IPPrefixLen = ones + + if iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.GlobalIPv6PrefixLen = onesv6 + } + + if len(ifaceList) == 1 { + return networkSettings, nil + } + + networkSettings.SecondaryIPAddresses = make([]network.Address, 0, len(ifaceList)-1) + networkSettings.SecondaryIPv6Addresses = make([]network.Address, 0, len(ifaceList)-1) + for _, iface := range ifaceList[1:] { + ones, _ := iface.Address().Mask.Size() + addr := network.Address{Addr: iface.Address().IP.String(), PrefixLen: ones} + networkSettings.SecondaryIPAddresses = append(networkSettings.SecondaryIPAddresses, addr) + + if iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + addrv6 := network.Address{Addr: iface.AddressIPv6().IP.String(), PrefixLen: onesv6} + networkSettings.SecondaryIPv6Addresses = append(networkSettings.SecondaryIPv6Addresses, addrv6) + } + } + + return networkSettings, nil +} + +func (container *Container) updateJoinInfo(ep libnetwork.Endpoint) error { + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + + container.NetworkSettings.Gateway = epInfo.Gateway().String() + if epInfo.GatewayIPv6().To16() != nil { + container.NetworkSettings.IPv6Gateway = epInfo.GatewayIPv6().String() + } + + container.NetworkSettings.SandboxKey = epInfo.SandboxKey() + + return nil +} + +func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libnetwork.Endpoint) error { + networkSettings := &network.Settings{NetworkID: n.ID(), EndpointID: ep.ID()} + + networkSettings, err := container.buildPortMapInfo(n, ep, networkSettings) + if err != nil { + return err + } + + networkSettings, err = container.buildEndpointInfo(n, ep, networkSettings) + if err != nil { + return err + } + + if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") { + networkSettings.Bridge = container.daemon.config.Bridge.Iface + } + + container.NetworkSettings = networkSettings + return nil +} + +func (container *Container) UpdateNetwork() error { + n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID) + if err != nil { + return fmt.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err) + } + + ep, err := n.EndpointByID(container.NetworkSettings.EndpointID) + if err != nil { + return fmt.Errorf("error locating endpoint id %s: %v", container.NetworkSettings.EndpointID, err) + } + + if err := ep.Leave(container.ID); err != nil { + return fmt.Errorf("endpoint leave failed: %v", err) + + } + + joinOptions, err := container.buildJoinOptions() + if err != nil { + return fmt.Errorf("Update network failed: %v", err) + } + + if err := ep.Join(container.ID, joinOptions...); err != nil { + return fmt.Errorf("endpoint join failed: %v", err) + } + + if err := container.updateJoinInfo(ep); err != nil { + return fmt.Errorf("Updating join info failed: %v", err) + } + + return nil +} + +func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointOption, error) { + var ( + portSpecs = make(nat.PortSet) + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + createOptions []libnetwork.EndpointOption + ) + + if container.Config.ExposedPorts != nil { + portSpecs = container.Config.ExposedPorts + } + + if container.hostConfig.PortBindings != nil { + for p, b := range container.hostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + container.NetworkSettings.PortMapping = nil + + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(newP.Int()) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.hostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + createOptions = append(createOptions, + libnetwork.CreateOptionPortMapping(pbList), + libnetwork.CreateOptionExposedPorts(exposeList)) + + if container.Config.MacAddress != "" { + mac, err := net.ParseMAC(container.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + + return createOptions, nil +} + +func parseService(controller libnetwork.NetworkController, service string) (string, string, string) { + dn := controller.Config().Daemon.DefaultNetwork + dd := controller.Config().Daemon.DefaultDriver + + snd := strings.Split(service, ".") + if len(snd) > 2 { + return strings.Join(snd[:len(snd)-2], "."), snd[len(snd)-2], snd[len(snd)-1] + } + if len(snd) > 1 { + return snd[0], snd[1], dd + } + return snd[0], dn, dd +} + +func createNetwork(controller libnetwork.NetworkController, dnet string, driver string) (libnetwork.Network, error) { + createOptions := []libnetwork.NetworkOption{} + genericOption := options.Generic{} + + // Bridge driver is special due to legacy reasons + if runconfig.NetworkMode(driver).IsBridge() { + genericOption[netlabel.GenericData] = map[string]interface{}{ + "BridgeName": dnet, + "AllowNonDefaultBridge": "true", + } + networkOption := libnetwork.NetworkOptionGeneric(genericOption) + createOptions = append(createOptions, networkOption) + } + + return controller.NewNetwork(driver, dnet, createOptions...) +} + +func (container *Container) secondaryNetworkRequired(primaryNetworkType string) bool { + switch primaryNetworkType { + case "bridge", "none", "host", "container": + return false + } + + if container.daemon.config.DisableBridge { + return false + } + + if container.Config.ExposedPorts != nil && len(container.Config.ExposedPorts) > 0 { + return true + } + if container.hostConfig.PortBindings != nil && len(container.hostConfig.PortBindings) > 0 { + return true + } + return false +} + +func (container *Container) AllocateNetwork() error { + mode := container.hostConfig.NetworkMode + controller := container.daemon.netController + if container.Config.NetworkDisabled || mode.IsContainer() { + return nil + } + + networkDriver := string(mode) + service := container.Config.PublishService + networkName := mode.NetworkName() + if mode.IsDefault() { + if service != "" { + service, networkName, networkDriver = parseService(controller, service) + } else { + networkName = controller.Config().Daemon.DefaultNetwork + networkDriver = controller.Config().Daemon.DefaultDriver + } + } else if service != "" { + return fmt.Errorf("conflicting options: publishing a service and network mode") + } + + if runconfig.NetworkMode(networkDriver).IsBridge() && container.daemon.config.DisableBridge { + container.Config.NetworkDisabled = true + return nil + } + + if service == "" { + // dot character "." has a special meaning to support SERVICE[.NETWORK] format. + // For backward compatiblity, replacing "." with "-", instead of failing + service = strings.Replace(container.Name, ".", "-", -1) + // Service names dont like "/" in them. removing it instead of failing for backward compatibility + service = strings.Replace(service, "/", "", -1) + } + + if container.secondaryNetworkRequired(networkDriver) { + // Configure Bridge as secondary network for port binding purposes + if err := container.configureNetwork("bridge", service, "bridge", false); err != nil { + return err + } + } + + if err := container.configureNetwork(networkName, service, networkDriver, mode.IsDefault()); err != nil { + return err + } + + return container.WriteHostConfig() +} + +func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error { + controller := container.daemon.netController + n, err := controller.NetworkByName(networkName) + if err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok || !canCreateNetwork { + return err + } + + if n, err = createNetwork(controller, networkName, networkDriver); err != nil { + return err + } + } + + ep, err := n.EndpointByName(service) + if err != nil { + if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok { + return err + } + + createOptions, err := container.buildCreateEndpointOptions() + if err != nil { + return err + } + + ep, err = n.CreateEndpoint(service, createOptions...) + if err != nil { + return err + } + } + + if err := container.updateNetworkSettings(n, ep); err != nil { + return err + } + + joinOptions, err := container.buildJoinOptions() + if err != nil { + return err + } + + if err := ep.Join(container.ID, joinOptions...); err != nil { + return err + } + + if err := container.updateJoinInfo(ep); err != nil { + return fmt.Errorf("Updating join info failed: %v", err) + } + + return nil +} + +func (container *Container) initializeNetworking() error { + var err error + + // Make sure NetworkMode has an acceptable value before + // initializing networking. + if container.hostConfig.NetworkMode == runconfig.NetworkMode("") { + container.hostConfig.NetworkMode = runconfig.NetworkMode("default") + } + if container.hostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := container.getNetworkedContainer() + if err != nil { + return err + } + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.hostConfig.NetworkMode.IsHost() { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + + parts := strings.SplitN(container.Config.Hostname, ".", 2) + if len(parts) > 1 { + container.Config.Hostname = parts[0] + container.Config.Domainname = parts[1] + } + + } + + if err := container.AllocateNetwork(); err != nil { + return err + } + + return container.buildHostnameFile() +} + +func (container *Container) ExportRw() (archive.Archive, error) { + if container.daemon == nil { + return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) + } + archive, err := container.daemon.Diff(container) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + return err + }), + nil +} + +func (container *Container) getIpcContainer() (*Container, error) { + containerID := container.hostConfig.IpcMode.Container() + c, err := container.daemon.Get(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + } + return c, nil +} + +func (container *Container) setupWorkingDirectory() error { + if container.Config.WorkingDir != "" { + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + pthInfo, err := os.Stat(pth) + if err != nil { + if !os.IsNotExist(err) { + return err + } + + if err := system.MkdirAll(pth, 0755); err != nil { + return err + } + } + if pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + } + return nil +} + +func (container *Container) getNetworkedContainer() (*Container, error) { + parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2) + switch parts[0] { + case "container": + if len(parts) != 2 { + return nil, fmt.Errorf("no container specified to join network") + } + nc, err := container.daemon.Get(parts[1]) + if err != nil { + return nil, err + } + if container == nc { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1]) + } + return nc, nil + default: + return nil, fmt.Errorf("network mode not set to container") + } +} + +func (container *Container) ReleaseNetwork() { + if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } + + eid := container.NetworkSettings.EndpointID + nid := container.NetworkSettings.NetworkID + + container.NetworkSettings = &network.Settings{} + + if nid == "" || eid == "" { + return + } + + n, err := container.daemon.netController.NetworkByID(nid) + if err != nil { + logrus.Errorf("error locating network id %s: %v", nid, err) + return + } + + ep, err := n.EndpointByID(eid) + if err != nil { + logrus.Errorf("error locating endpoint id %s: %v", eid, err) + return + } + + switch { + case container.hostConfig.NetworkMode.IsHost(): + if err := ep.Leave(container.ID); err != nil { + logrus.Errorf("Error leaving endpoint id %s for container %s: %v", eid, container.ID, err) + return + } + default: + if err := container.daemon.netController.LeaveAll(container.ID); err != nil { + logrus.Errorf("Leave all failed for %s: %v", container.ID, err) + return + } + } + + // In addition to leaving all endpoints, delete implicitly created endpoint + if container.Config.PublishService == "" { + if err := ep.Delete(); err != nil { + logrus.Errorf("deleting endpoint failed: %v", err) + } + } + +} + +func disableAllActiveLinks(container *Container) { + if container.activeLinks != nil { + for _, link := range container.activeLinks { + link.Disable() + } + } +} + +func (container *Container) DisableLink(name string) { + if container.activeLinks != nil { + if link, exists := container.activeLinks[name]; exists { + link.Disable() + delete(container.activeLinks, name) + if err := container.UpdateNetwork(); err != nil { + logrus.Debugf("Could not update network to remove link: %v", err) + } + } else { + logrus.Debugf("Could not find active link for %s", name) + } + } +} + +func (container *Container) UnmountVolumes(forceSyscall bool) error { + var volumeMounts []mountPoint + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + return err + } + + volumeMounts = append(volumeMounts, mountPoint{Destination: dest, Volume: mntPoint.Volume}) + } + + for _, mnt := range container.networkMounts() { + dest, err := container.GetResourcePath(mnt.Destination) + if err != nil { + return err + } + + volumeMounts = append(volumeMounts, mountPoint{Destination: dest}) + } + + for _, volumeMount := range volumeMounts { + if forceSyscall { + syscall.Unmount(volumeMount.Destination, 0) + } + + if volumeMount.Volume != nil { + if err := volumeMount.Volume.Unmount(); err != nil { + return err + } + } + } + + return nil +} + +func (container *Container) PrepareStorage() error { + return nil +} + +func (container *Container) CleanupStorage() error { + return nil +} diff --git a/daemon/container_windows.go b/daemon/container_windows.go new file mode 100644 index 00000000..ebba2e72 --- /dev/null +++ b/daemon/container_windows.go @@ -0,0 +1,215 @@ +// +build windows + +package daemon + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/graphdriver/windows" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/microsoft/hcsshim" +) + +// This is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + +type Container struct { + CommonContainer + + // Fields below here are platform specific. + + // TODO Windows. Further factoring out of unused fields will be necessary. + + // ---- START OF TEMPORARY DECLARATION ---- + // TODO Windows. Temporarily keeping fields in to assist in compilation + // of the daemon on Windows without affecting many other files in a single + // PR, thus making code review significantly harder. These lines will be + // removed in subsequent PRs. + + AppArmorProfile string + // ---- END OF TEMPORARY DECLARATION ---- + +} + +func killProcessDirectly(container *Container) error { + return nil +} + +func (container *Container) setupContainerDns() error { + return nil +} + +func (container *Container) updateParentsHosts() error { + return nil +} + +func (container *Container) setupLinkedContainers() ([]string, error) { + return nil, nil +} + +func (container *Container) createDaemonEnvironment(linkedEnv []string) []string { + // On Windows, nothing to link. Just return the container environment. + return container.Config.Env +} + +func (container *Container) initializeNetworking() error { + return nil +} + +func (container *Container) setupWorkingDirectory() error { + return nil +} + +func populateCommand(c *Container, env []string) error { + en := &execdriver.Network{ + Mtu: c.daemon.config.Mtu, + Interface: nil, + } + + parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) + switch parts[0] { + + case "none": + case "default", "": // empty string to support existing containers + if !c.Config.NetworkDisabled { + en.Interface = &execdriver.NetworkInterface{ + MacAddress: c.Config.MacAddress, + Bridge: c.daemon.config.Bridge.VirtualSwitchName, + } + } + default: + return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) + } + + pid := &execdriver.Pid{} + + // TODO Windows. This can probably be factored out. + pid.HostPid = c.hostConfig.PidMode.IsHost() + + // TODO Windows. Resource controls to be implemented later. + resources := &execdriver.Resources{} + + // TODO Windows. Further refactoring required (privileged/user) + processConfig := execdriver.ProcessConfig{ + Privileged: c.hostConfig.Privileged, + Entrypoint: c.Path, + Arguments: c.Args, + Tty: c.Config.Tty, + User: c.Config.User, + ConsoleSize: c.hostConfig.ConsoleSize, + } + + processConfig.Env = env + + var layerFolder string + var layerPaths []string + + // The following is specific to the Windows driver. We do this to + // enable VFS to continue operating for development purposes. + if wd, ok := c.daemon.driver.(*windows.WindowsGraphDriver); ok { + var err error + var img *image.Image + var ids []string + + if img, err = c.daemon.graph.Get(c.ImageID); err != nil { + return fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) + } + if ids, err = c.daemon.graph.ParentLayerIds(img); err != nil { + return fmt.Errorf("Failed to get parentlayer ids %s", img.ID) + } + layerPaths = wd.LayerIdsToPaths(ids) + layerFolder = filepath.Join(wd.Info().HomeDir, filepath.Base(c.ID)) + } + + // TODO Windows: Factor out remainder of unused fields. + c.command = &execdriver.Command{ + ID: c.ID, + Rootfs: c.RootfsPath(), + ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, + InitPath: "/.dockerinit", + WorkingDir: c.Config.WorkingDir, + Network: en, + Pid: pid, + Resources: resources, + CapAdd: c.hostConfig.CapAdd.Slice(), + CapDrop: c.hostConfig.CapDrop.Slice(), + ProcessConfig: processConfig, + ProcessLabel: c.GetProcessLabel(), + MountLabel: c.GetMountLabel(), + FirstStart: !c.HasBeenStartedBefore, + LayerFolder: layerFolder, + LayerPaths: layerPaths, + } + + return nil +} + +// GetSize, return real size, virtual size +func (container *Container) GetSize() (int64, int64) { + // TODO Windows + return 0, 0 +} + +func (container *Container) AllocateNetwork() error { + return nil +} + +func (container *Container) ExportRw() (archive.Archive, error) { + if container.IsRunning() { + return nil, fmt.Errorf("Cannot export a running container.") + } + // TODO Windows. Implementation (different to Linux) + return nil, nil +} + +func (container *Container) ReleaseNetwork() { +} + +func (container *Container) RestoreNetwork() error { + return nil +} + +func disableAllActiveLinks(container *Container) { +} + +func (container *Container) DisableLink(name string) { +} + +func (container *Container) UnmountVolumes(forceSyscall bool) error { + return nil +} + +func (container *Container) PrepareStorage() error { + if wd, ok := container.daemon.driver.(*windows.WindowsGraphDriver); ok { + // Get list of paths to parent layers. + var ids []string + if container.ImageID != "" { + img, err := container.daemon.graph.Get(container.ImageID) + if err != nil { + return err + } + + ids, err = container.daemon.graph.ParentLayerIds(img) + if err != nil { + return err + } + } + + if err := hcsshim.PrepareLayer(wd.Info(), container.ID, wd.LayerIdsToPaths(ids)); err != nil { + return err + } + } + return nil +} + +func (container *Container) CleanupStorage() error { + if wd, ok := container.daemon.driver.(*windows.WindowsGraphDriver); ok { + return hcsshim.UnprepareLayer(wd.Info(), container.ID) + } + return nil +} diff --git a/daemon/create.go b/daemon/create.go new file mode 100644 index 00000000..a4a740f0 --- /dev/null +++ b/daemon/create.go @@ -0,0 +1,157 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/opencontainers/runc/libcontainer/label" +) + +func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig) (string, []string, error) { + if config == nil { + return "", nil, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(hostConfig, config) + if err != nil { + return "", warnings, err + } + + container, buildWarnings, err := daemon.Create(config, hostConfig, name) + if err != nil { + if daemon.Graph().IsNotExist(err, config.Image) { + _, tag := parsers.ParseRepositoryTag(config.Image) + if tag == "" { + tag = graph.DEFAULTTAG + } + return "", warnings, fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag) + } + return "", warnings, err + } + + warnings = append(warnings, buildWarnings...) + + return container.ID, warnings, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (*Container, []string, error) { + var ( + container *Container + warnings []string + img *image.Image + imgID string + err error + ) + + if config.Image != "" { + img, err = daemon.repositories.LookupImage(config.Image) + if err != nil { + return nil, nil, err + } + if err = daemon.graph.CheckDepth(img); err != nil { + return nil, nil, err + } + imgID = img.ID + } + + if err := daemon.mergeAndVerifyConfig(config, img); err != nil { + return nil, nil, err + } + if hostConfig == nil { + hostConfig = &runconfig.HostConfig{} + } + if hostConfig.SecurityOpt == nil { + hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) + if err != nil { + return nil, nil, err + } + } + if container, err = daemon.newContainer(name, config, imgID); err != nil { + return nil, nil, err + } + if err := daemon.Register(container); err != nil { + return nil, nil, err + } + if err := daemon.createRootfs(container); err != nil { + return nil, nil, err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return nil, nil, err + } + if err := container.Mount(); err != nil { + return nil, nil, err + } + defer container.Unmount() + + for spec := range config.Volumes { + var ( + name, destination string + parts = strings.Split(spec, ":") + ) + switch len(parts) { + case 2: + name, destination = parts[0], filepath.Clean(parts[1]) + default: + name = stringid.GenerateRandomID() + destination = filepath.Clean(parts[0]) + } + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.isDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return nil, nil, err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return nil, nil, fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := createVolume(name, config.VolumeDriver) + if err != nil { + return nil, nil, err + } + if err := label.Relabel(v.Path(), container.MountLabel, "z"); err != nil { + return nil, nil, err + } + + if err := container.copyImagePathContent(v, destination); err != nil { + return nil, nil, err + } + + container.addMountPointWithVolume(destination, v, true) + } + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving new container to disk: %v", err) + return nil, nil, err + } + container.LogEvent("create") + return container, warnings, nil +} + +func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) { + if ipcMode.IsHost() || pidMode.IsHost() { + return label.DisableSecOpt(), nil + } + if ipcContainer := ipcMode.Container(); ipcContainer != "" { + c, err := daemon.Get(ipcContainer) + if err != nil { + return nil, err + } + + return label.DupSecOpt(c.ProcessLabel), nil + } + return nil, nil +} diff --git a/daemon/daemon.go b/daemon/daemon.go new file mode 100644 index 00000000..e07a0117 --- /dev/null +++ b/daemon/daemon.go @@ -0,0 +1,981 @@ +package daemon + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/execdrivers" + "github.com/docker/docker/daemon/graphdriver" + _ "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/opencontainers/runc/libcontainer/netlink" +) + +var ( + validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) + + ErrSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") +) + +type contStore struct { + s map[string]*Container + sync.Mutex +} + +func (c *contStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +func (c *contStore) Get(id string) *Container { + c.Lock() + res := c.s[id] + c.Unlock() + return res +} + +func (c *contStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +func (c *contStore) List() []*Container { + containers := new(History) + c.Lock() + for _, cont := range c.s { + containers.Add(cont) + } + c.Unlock() + containers.Sort() + return *containers +} + +type Daemon struct { + ID string + repository string + sysInitPath string + containers *contStore + execCommands *execStore + graph *graph.Graph + repositories *graph.TagStore + idIndex *truncindex.TruncIndex + sysInfo *sysinfo.SysInfo + config *Config + containerGraph *graphdb.Database + driver graphdriver.Driver + execDriver execdriver.Driver + statsCollector *statsCollector + defaultLogConfig runconfig.LogConfig + RegistryService *registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + root string +} + +// Get looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) Get(prefixOrName string) (*Container, error) { + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerId, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + return nil, indexError + } + return daemon.containers.Get(containerId), nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.Get(id) + return c != nil +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*Container, error) { + container := &Container{ + CommonContainer: daemon.newBaseContainer(id), + } + + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +// Register makes a container object usable by the daemon as +// This is a wrapper for register +func (daemon *Daemon) Register(container *Container) error { + return daemon.register(container, true) +} + +// register makes a container object usable by the daemon as +func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { + if container.daemon != nil || daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if err := daemon.ensureName(container); err != nil { + return err + } + + container.daemon = daemon + + // Attach to stdout and stderr + container.stderr = broadcastwriter.New() + container.stdout = broadcastwriter.New() + // Attach to stdin + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } else { + container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin + } + // done + daemon.containers.Add(container.ID, container) + + // don't update the Suffixarray if we're starting up + // we'll waste time if we update it for every container + daemon.idIndex.Add(container.ID) + + if err := daemon.verifyVolumesInfo(container); err != nil { + return err + } + + if err := container.prepareMountPoints(); err != nil { + return err + } + + if container.IsRunning() { + logrus.Debugf("killing old running container %s", container.ID) + // Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit + container.SetStopped(&execdriver.ExitStatus{ExitCode: 137}) + + // use the current driver and ensure that the container is dead x.x + cmd := &execdriver.Command{ + ID: container.ID, + } + daemon.execDriver.Terminate(cmd) + + if err := container.Unmount(); err != nil { + logrus.Debugf("unmount error %s", err) + } + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving stopped state to disk: %v", err) + } + } + + return nil +} + +func (daemon *Daemon) ensureName(container *Container) error { + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving container name to disk: %v", err) + } + } + return nil +} + +func (daemon *Daemon) restore() error { + type cr struct { + container *Container + registered bool + } + + var ( + debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") + currentDriver = daemon.driver.String() + containers = make(map[string]*cr) + ) + + if !debug { + logrus.Info("Loading containers: start.") + } + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if !debug && logrus.GetLevel() == logrus.InfoLevel { + fmt.Print(".") + } + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = &cr{container: container} + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + if entities := daemon.containerGraph.List("/", -1); entities != nil { + for _, p := range entities.Paths() { + if !debug && logrus.GetLevel() == logrus.InfoLevel { + fmt.Print(".") + } + + e := entities[p] + + if c, ok := containers[e.ID()]; ok { + c.registered = true + } + } + } + + group := sync.WaitGroup{} + for _, c := range containers { + group.Add(1) + + go func(container *Container, registered bool) { + defer group.Done() + + if !registered { + // Try to set the default name for a container if it exists prior to links + container.Name, err = daemon.generateNewName(container.ID) + if err != nil { + logrus.Debugf("Setting default id - %s", err) + } + } + + if err := daemon.register(container, false); err != nil { + logrus.Debugf("Failed to register container %s: %s", container.ID, err) + } + + // check the restart policy on the containers and restart any container with + // the restart policy of "always" + if daemon.config.AutoRestart && container.shouldRestart() { + logrus.Debugf("Starting container %s", container.ID) + + if err := container.Start(); err != nil { + logrus.Debugf("Failed to start container %s: %s", container.ID, err) + } + } + }(c.container, c.registered) + } + group.Wait() + + if !debug { + if logrus.GetLevel() == logrus.InfoLevel { + fmt.Println() + } + logrus.Info("Loading containers: done.") + } + + return nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := runconfig.Merge(config, img.Config); err != nil { + return err + } + } + if config.Entrypoint.Len() == 0 && config.Cmd.Len() == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +func (daemon *Daemon) generateIdAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateRandomID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(name) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + + if name[0] != '/' { + name = "/" + name + } + + if _, err := daemon.containerGraph.Set(name, id); err != nil { + if !graphdb.IsNonUniqueNameError(err) { + return "", err + } + + conflictingContainer, err := daemon.GetByName(name) + if err != nil { + if strings.Contains(err.Error(), "Could not find entity") { + return "", err + } + + // Remove name and continue starting the container + if err := daemon.containerGraph.Delete(name); err != nil { + return "", err + } + } else { + nameAsKnownByUser := strings.TrimPrefix(name, "/") + return "", fmt.Errorf( + "Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser, + stringid.TruncateID(conflictingContainer.ID)) + } + } + return name, nil +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if _, err := daemon.containerGraph.Set(name, id); err != nil { + if !graphdb.IsNonUniqueNameError(err) { + return "", err + } + continue + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if _, err := daemon.containerGraph.Set(name, id); err != nil { + return "", err + } + return name, nil +} + +func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) { + // Generate default hostname + // FIXME: the lxc template no longer needs to set a default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *runconfig.Entrypoint, configCmd *runconfig.Command) (string, []string) { + var ( + entrypoint string + args []string + ) + + cmdSlice := configCmd.Slice() + if configEntrypoint.Len() != 0 { + eSlice := configEntrypoint.Slice() + entrypoint = eSlice[0] + args = append(eSlice[1:], cmdSlice...) + } else { + entrypoint = cmdSlice[0] + args = cmdSlice[1:] + } + return entrypoint, args +} + +func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID string) (*Container, error) { + var ( + id string + err error + ) + id, name, err = daemon.generateIdAndName(name) + if err != nil { + return nil, err + } + + daemon.generateHostname(id, config) + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.hostConfig = &runconfig.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{} + base.Name = name + base.Driver = daemon.driver.String() + base.ExecDriver = daemon.execDriver.Name() + + container := &Container{ + CommonContainer: base, + } + + return container, err +} + +func GetFullContainerName(name string) (string, error) { + if name == "" { + return "", fmt.Errorf("Container name cannot be empty") + } + if name[0] != '/' { + name = "/" + name + } + return name, nil +} + +func (daemon *Daemon) GetByName(name string) (*Container, error) { + fullName, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + entity := daemon.containerGraph.Get(fullName) + if entity == nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(entity.ID()) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID()) + } + return e, nil +} + +func (daemon *Daemon) Children(name string) (map[string]*Container, error) { + name, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + children := make(map[string]*Container) + + err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { + c, err := daemon.Get(e.ID()) + if err != nil { + return err + } + children[p] = c + return nil + }, 0) + + if err != nil { + return nil, err + } + return children, nil +} + +func (daemon *Daemon) Parents(name string) ([]string, error) { + name, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + + return daemon.containerGraph.Parents(name) +} + +func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error { + fullName := filepath.Join(parent.Name, alias) + if !daemon.containerGraph.Exists(fullName) { + _, err := daemon.containerGraph.Set(fullName, child.ID) + return err + } + return nil +} + +func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure we have compatible configuration options + if err := checkConfigOptions(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + return nil, ErrSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + setupDumpStackTrap() + + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + config.Root = realRoot + // Create the root directory if it doesn't exists + if err := system.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := tempDir(config.Root) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + // Set the default driver + graphdriver.DefaultDriver = config.GraphDriver + + // Load storage driver + driver, err := graphdriver.New(config.Root, config.GraphOptions) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Using graph driver %s", driver) + + d := &Daemon{} + d.driver = driver + + // Ensure the graph driver is shutdown at a later point + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + // Verify logging driver type + if config.LogConfig.Type != "none" { + if _, err := logger.GetLogDriver(config.LogConfig.Type); err != nil { + return nil, fmt.Errorf("error finding the logging driver: %v", err) + } + } + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, d.driver.String()); err != nil { + return nil, err + } + + daemonRepo := filepath.Join(config.Root, "containers") + + if err := system.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + // Migrate the container if it is aufs and aufs is enabled + if err := migrateIfDownlevel(d.driver, config.Root); err != nil { + return nil, err + } + + logrus.Debug("Creating images graph") + g, err := graph.NewGraph(filepath.Join(config.Root, "graph"), d.driver) + if err != nil { + return nil, err + } + + // Configure the volumes driver + if err := configureVolumes(config); err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + eventsService := events.New() + logrus.Debug("Creating repository list") + tagCfg := &graph.TagStoreConfig{ + Graph: g, + Key: trustKey, + Registry: registryService, + Events: eventsService, + } + repositories, err := graph.NewTagStore(filepath.Join(config.Root, "repositories-"+d.driver.String()), tagCfg) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories-%s: %s", d.driver.String(), err) + } + + d.netController, err = initNetworkController(config) + if err != nil { + return nil, fmt.Errorf("Error initializing network controller: %v", err) + } + + graphdbPath := filepath.Join(config.Root, "linkgraph.db") + graph, err := graphdb.NewSqliteConn(graphdbPath) + if err != nil { + return nil, err + } + + d.containerGraph = graph + + var sysInitPath string + if config.ExecDriver == "lxc" { + initPath, err := configureSysInit(config) + if err != nil { + return nil, err + } + sysInitPath = initPath + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux/FreeBSD. + if runtime.GOOS != "windows" && !sysInfo.CgroupDevicesEnabled { + return nil, fmt.Errorf("Devices cgroup isn't mounted") + } + + ed, err := execdrivers.NewDriver(config.ExecDriver, config.ExecOptions, config.ExecRoot, config.Root, sysInitPath, sysInfo) + if err != nil { + return nil, err + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = &contStore{s: make(map[string]*Container)} + d.execCommands = newExecStore() + d.graph = g + d.repositories = repositories + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.sysInfo = sysInfo + d.config = config + d.sysInitPath = sysInitPath + d.execDriver = ed + d.statsCollector = newStatsCollector(1 * time.Second) + d.defaultLogConfig = config.LogConfig + d.RegistryService = registryService + d.EventsService = eventsService + d.root = config.Root + go d.execCommandGC() + + if err := d.restore(); err != nil { + return nil, err + } + + return d, nil +} + +func (daemon *Daemon) Shutdown() error { + if daemon.containers != nil { + group := sync.WaitGroup{} + logrus.Debug("starting clean shutdown of all containers...") + for _, container := range daemon.List() { + c := container + if c.IsRunning() { + logrus.Debugf("stopping %s", c.ID) + group.Add(1) + + go func() { + defer group.Done() + // If container failed to exit in 10 seconds of SIGTERM, then using the force + if err := c.Stop(10); err != nil { + logrus.Errorf("Stop container %s with error: %v", c.ID, err) + } + c.WaitStop(-1 * time.Second) + logrus.Debugf("container stopped %s", c.ID) + }() + } + } + group.Wait() + + // trigger libnetwork GC only if it's initialized + if daemon.netController != nil { + daemon.netController.GC() + } + } + + if daemon.containerGraph != nil { + if err := daemon.containerGraph.Close(); err != nil { + logrus.Errorf("Error during container graph.Close(): %v", err) + } + } + + if daemon.driver != nil { + if err := daemon.driver.Cleanup(); err != nil { + logrus.Errorf("Error during graph storage driver.Cleanup(): %v", err) + } + } + + return nil +} + +func (daemon *Daemon) Mount(container *Container) error { + dir, err := daemon.driver.Get(container.ID, container.GetMountLabel()) + if err != nil { + return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) + } + + if container.basefs != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.basefs != "" && runtime.GOOS != "windows" { + daemon.driver.Put(container.ID) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.driver, container.ID, container.basefs, dir) + } + } + container.basefs = dir + return nil +} + +func (daemon *Daemon) Unmount(container *Container) error { + daemon.driver.Put(container.ID) + return nil +} + +func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { + return daemon.execDriver.Run(c.command, pipes, startCallback) +} + +func (daemon *Daemon) Kill(c *Container, sig int) error { + return daemon.execDriver.Kill(c.command, sig) +} + +func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) { + return daemon.execDriver.Stats(c.ID) +} + +func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) { + c, err := daemon.Get(name) + if err != nil { + return nil, err + } + ch := daemon.statsCollector.collect(c) + return ch, nil +} + +func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error { + c, err := daemon.Get(name) + if err != nil { + return err + } + daemon.statsCollector.unsubscribe(c, ch) + return nil +} + +// FIXME: this is a convenience function for integration tests +// which need direct access to daemon.graph. +// Once the tests switch to using engine and jobs, this method +// can go away. +func (daemon *Daemon) Graph() *graph.Graph { + return daemon.graph +} + +func (daemon *Daemon) Repositories() *graph.TagStore { + return daemon.repositories +} + +func (daemon *Daemon) Config() *Config { + return daemon.config +} + +func (daemon *Daemon) SystemConfig() *sysinfo.SysInfo { + return daemon.sysInfo +} + +func (daemon *Daemon) SystemInitPath() string { + return daemon.sysInitPath +} + +func (daemon *Daemon) GraphDriver() graphdriver.Driver { + return daemon.driver +} + +func (daemon *Daemon) ExecutionDriver() execdriver.Driver { + return daemon.execDriver +} + +func (daemon *Daemon) ContainerGraph() *graphdb.Database { + return daemon.containerGraph +} + +func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { + // Retrieve all images + images := daemon.Graph().Map() + + // Store the tree in a map of map (map[parentId][childId]) + imageMap := make(map[string]map[string]struct{}) + for _, img := range images { + if _, exists := imageMap[img.Parent]; !exists { + imageMap[img.Parent] = make(map[string]struct{}) + } + imageMap[img.Parent][img.ID] = struct{}{} + } + + // Loop on the children of the given image and check the config + var match *image.Image + for elem := range imageMap[imgID] { + img, ok := images[elem] + if !ok { + return nil, fmt.Errorf("unable to find image %q", elem) + } + if runconfig.Compare(&img.ContainerConfig, config) { + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil +} + +// tempDir returns the default directory to use for temporary files. +func tempDir(rootDir string) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + return tmpDir, system.MkdirAll(tmpDir, 0700) +} + +func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { + container.Lock() + if err := parseSecurityOpt(container, hostConfig); err != nil { + container.Unlock() + return err + } + container.Unlock() + + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + // Register any links from the host config before starting the container + if err := daemon.RegisterLinks(container, hostConfig); err != nil { + return err + } + + container.hostConfig = hostConfig + container.toDisk() + return nil +} + +func (daemon *Daemon) newBaseContainer(id string) CommonContainer { + return CommonContainer{ + ID: id, + State: NewState(), + MountPoints: make(map[string]*mountPoint), + Volumes: make(map[string]string), + VolumesRW: make(map[string]bool), + execCommands: newExecStore(), + root: daemon.containerRoot(id), + } +} + +func setDefaultMtu(config *Config) { + // do nothing if the config does not have the default 0 value. + if config.Mtu != 0 { + return + } + config.Mtu = defaultNetworkMtu + if routeMtu, err := getDefaultRouteMtu(); err == nil { + config.Mtu = routeMtu + } +} + +var errNoDefaultRoute = errors.New("no default route was found") + +// getDefaultRouteMtu returns the MTU for the default route's interface. +func getDefaultRouteMtu() (int, error) { + routes, err := netlink.NetworkGetRoutes() + if err != nil { + return 0, err + } + for _, r := range routes { + if r.Default && r.Iface != nil { + return r.Iface.MTU, nil + } + } + return 0, errNoDefaultRoute +} diff --git a/daemon/daemon_aufs.go b/daemon/daemon_aufs.go new file mode 100644 index 00000000..0848ea92 --- /dev/null +++ b/daemon/daemon_aufs.go @@ -0,0 +1,21 @@ +// +build !exclude_graphdriver_aufs,linux + +package daemon + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/aufs" +) + +// Given the graphdriver ad, if it is aufs, then migrate it. +// If aufs driver is not built, this func is a noop. +func migrateIfAufs(driver graphdriver.Driver, root string) error { + if ad, ok := driver.(*aufs.Driver); ok { + logrus.Debugf("Migrating existing containers") + if err := ad.Migrate(root, setupInitLayer); err != nil { + return err + } + } + return nil +} diff --git a/daemon/daemon_btrfs.go b/daemon/daemon_btrfs.go new file mode 100644 index 00000000..61dac0dd --- /dev/null +++ b/daemon/daemon_btrfs.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_btrfs,linux + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff --git a/daemon/daemon_devicemapper.go b/daemon/daemon_devicemapper.go new file mode 100644 index 00000000..5b64c453 --- /dev/null +++ b/daemon/daemon_devicemapper.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff --git a/daemon/daemon_no_aufs.go b/daemon/daemon_no_aufs.go new file mode 100644 index 00000000..921b3958 --- /dev/null +++ b/daemon/daemon_no_aufs.go @@ -0,0 +1,11 @@ +// +build exclude_graphdriver_aufs,linux + +package daemon + +import ( + "github.com/docker/docker/daemon/graphdriver" +) + +func migrateIfAufs(driver graphdriver.Driver, root string) error { + return nil +} diff --git a/daemon/daemon_overlay.go b/daemon/daemon_overlay.go new file mode 100644 index 00000000..25a42a19 --- /dev/null +++ b/daemon/daemon_overlay.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_overlay,linux + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/overlay" +) diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go new file mode 100644 index 00000000..5e0233da --- /dev/null +++ b/daemon/daemon_test.go @@ -0,0 +1,516 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" +) + +// +// https://github.com/docker/docker/issues/8069 +// + +func TestGet(t *testing.T) { + c1 := &Container{ + CommonContainer: CommonContainer{ + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", + }, + } + + c2 := &Container{ + CommonContainer: CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", + }, + } + + c3 := &Container{ + CommonContainer: CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", + }, + } + + c4 := &Container{ + CommonContainer: CommonContainer{ + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + }, + } + + c5 := &Container{ + CommonContainer: CommonContainer{ + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", + }, + } + + store := &contStore{ + s: map[string]*Container{ + c1.ID: c1, + c2.ID: c2, + c3.ID: c3, + c4.ID: c4, + c5.ID: c5, + }, + } + + index := truncindex.NewTruncIndex([]string{}) + index.Add(c1.ID) + index.Add(c2.ID) + index.Add(c3.ID) + index.Add(c4.ID) + index.Add(c5.ID) + + daemonTestDbPath := path.Join(os.TempDir(), "daemon_test.db") + graph, err := graphdb.NewSqliteConn(daemonTestDbPath) + if err != nil { + t.Fatalf("Failed to create daemon test sqlite database at %s", daemonTestDbPath) + } + graph.Set(c1.Name, c1.ID) + graph.Set(c2.Name, c2.ID) + graph.Set(c3.Name, c3.ID) + graph.Set(c4.Name, c4.ID) + graph.Set(c5.Name, c5.ID) + + daemon := &Daemon{ + containers: store, + idIndex: index, + containerGraph: graph, + } + + if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { + t.Fatal("Should explicitly match full container IDs") + } + + if container, _ := daemon.Get("75fb0b8009"); container != c4 { + t.Fatal("Should match a partial ID") + } + + if container, _ := daemon.Get("drunk_hawking"); container != c2 { + t.Fatal("Should match a full name") + } + + // c3.Name is a partial match for both c3.ID and c2.ID + if c, _ := daemon.Get("3cdbd1aa"); c != c3 { + t.Fatal("Should match a full name even though it collides with another container's ID") + } + + if container, _ := daemon.Get("d22d69a2b896"); container != c5 { + t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID") + } + + if _, err := daemon.Get("3cdbd1"); err == nil { + t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") + } + + if _, err := daemon.Get("nothing"); err == nil { + t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") + } + + os.Remove(daemonTestDbPath) +} + +func TestLoadWithVolume(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerId) + if err := os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + hostVolumeId := stringid.GenerateRandomID() + vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId) + volumePath := filepath.Join(tmp, "volumes", hostVolumeId) + + if err := os.MkdirAll(vfsPath, 0755); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(volumePath, 0755); err != nil { + t.Fatal(err) + } + + content := filepath.Join(vfsPath, "helo") + if err := ioutil.WriteFile(content, []byte("HELO"), 0644); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"Volumes":{"/vol1":"%s"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}` + + cfg := fmt.Sprintf(config, vfsPath) + if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(cfg), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":[],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonForVolumesTest(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerId) + if err != nil { + t.Fatal(err) + } + + err = daemon.verifyVolumesInfo(c) + if err != nil { + t.Fatal(err) + } + + if len(c.MountPoints) != 1 { + t.Fatalf("Expected 1 volume mounted, was 0\n") + } + + m := c.MountPoints["/vol1"] + if m.Name != hostVolumeId { + t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name) + } + + if m.Destination != "/vol1" { + t.Fatalf("Expected mount destination /vol1, was %s\n", m.Destination) + } + + if !m.RW { + t.Fatalf("Expected mount point to be RW but it was not\n") + } + + if m.Driver != volume.DefaultDriverName { + t.Fatalf("Expected mount driver local, was %s\n", m.Driver) + } + + newVolumeContent := filepath.Join(volumePath, local.VolumeDataPathName, "helo") + b, err := ioutil.ReadFile(newVolumeContent) + if err != nil { + t.Fatal(err) + } + if string(b) != "HELO" { + t.Fatalf("Expected HELO, was %s\n", string(b)) + } +} + +func TestLoadWithBindMount(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerId) + if err = os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"Volumes":{"/vol1": "/vol1"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}` + + if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(config), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":["/vol1:/vol1"],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonForVolumesTest(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerId) + if err != nil { + t.Fatal(err) + } + + err = daemon.verifyVolumesInfo(c) + if err != nil { + t.Fatal(err) + } + + if len(c.MountPoints) != 1 { + t.Fatalf("Expected 1 volume mounted, was 0\n") + } + + m := c.MountPoints["/vol1"] + if m.Name != "" { + t.Fatalf("Expected empty mount name, was %s\n", m.Name) + } + + if m.Source != "/vol1" { + t.Fatalf("Expected mount source /vol1, was %s\n", m.Source) + } + + if m.Destination != "/vol1" { + t.Fatalf("Expected mount destination /vol1, was %s\n", m.Destination) + } + + if !m.RW { + t.Fatalf("Expected mount point to be RW but it was not\n") + } +} + +func TestLoadWithVolume17RC(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerId) + if err := os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + hostVolumeId := "6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101" + volumePath := filepath.Join(tmp, "volumes", hostVolumeId) + + if err := os.MkdirAll(volumePath, 0755); err != nil { + t.Fatal(err) + } + + content := filepath.Join(volumePath, "helo") + if err := ioutil.WriteFile(content, []byte("HELO"), 0644); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"MountPoints":{"/vol1":{"Name":"6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101","Destination":"/vol1","Driver":"local","RW":true,"Source":"","Relabel":""}},"AppliedVolumesFrom":null}` + + if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(config), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":[],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonForVolumesTest(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerId) + if err != nil { + t.Fatal(err) + } + + err = daemon.verifyVolumesInfo(c) + if err != nil { + t.Fatal(err) + } + + if len(c.MountPoints) != 1 { + t.Fatalf("Expected 1 volume mounted, was 0\n") + } + + m := c.MountPoints["/vol1"] + if m.Name != hostVolumeId { + t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name) + } + + if m.Destination != "/vol1" { + t.Fatalf("Expected mount destination /vol1, was %s\n", m.Destination) + } + + if !m.RW { + t.Fatalf("Expected mount point to be RW but it was not\n") + } + + if m.Driver != volume.DefaultDriverName { + t.Fatalf("Expected mount driver local, was %s\n", m.Driver) + } + + newVolumeContent := filepath.Join(volumePath, local.VolumeDataPathName, "helo") + b, err := ioutil.ReadFile(newVolumeContent) + if err != nil { + t.Fatal(err) + } + if string(b) != "HELO" { + t.Fatalf("Expected HELO, was %s\n", string(b)) + } +} + +func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerId) + if err := os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + hostVolumeId := stringid.GenerateRandomID() + vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId) + volumePath := filepath.Join(tmp, "volumes", hostVolumeId) + + if err := os.MkdirAll(vfsPath, 0755); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(volumePath, 0755); err != nil { + t.Fatal(err) + } + + content := filepath.Join(vfsPath, "helo") + if err := ioutil.WriteFile(content, []byte("HELO"), 0644); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"Volumes":{"/vol1":"%s"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}` + + cfg := fmt.Sprintf(config, vfsPath) + if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(cfg), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":[],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonForVolumesTest(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerId) + if err != nil { + t.Fatal(err) + } + + err = daemon.verifyVolumesInfo(c) + if err != nil { + t.Fatal(err) + } + + if len(c.MountPoints) != 1 { + t.Fatalf("Expected 1 volume mounted, was 0\n") + } + + m := c.MountPoints["/vol1"] + v, err := createVolume(m.Name, m.Driver) + if err != nil { + t.Fatal(err) + } + + if err := removeVolume(v); err != nil { + t.Fatal(err) + } + + fi, err := os.Stat(vfsPath) + if err == nil || !os.IsNotExist(err) { + t.Fatalf("Expected vfs path to not exist: %v - %v\n", fi, err) + } +} + +func initDaemonForVolumesTest(tmp string) (*Daemon, error) { + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + volumesDriver, err := local.New(tmp) + if err != nil { + return nil, err + } + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + + return daemon, nil +} diff --git a/daemon/daemon_unit_test.go b/daemon/daemon_unit_test.go new file mode 100644 index 00000000..fbc3302a --- /dev/null +++ b/daemon/daemon_unit_test.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/runconfig" +) + +func TestParseSecurityOpt(t *testing.T) { + container := &Container{} + config := &runconfig.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor:test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test valid label + config.SecurityOpt = []string{"label:user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go new file mode 100644 index 00000000..1bc394c4 --- /dev/null +++ b/daemon/daemon_unix.go @@ -0,0 +1,534 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/libnetwork" + nwapi "github.com/docker/libnetwork/api" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/opencontainers/runc/libcontainer/label" +) + +func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { + initID := fmt.Sprintf("%s-init", container.ID) + return daemon.driver.Changes(container.ID, initID) +} + +func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { + initID := fmt.Sprintf("%s-init", container.ID) + return daemon.driver.Diff(container.ID, initID) +} + +func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + con := strings.SplitN(opt, ":", 2) + if len(con) == 1 { + return fmt.Errorf("Invalid --security-opt: %q", opt) + } + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + default: + return fmt.Errorf("Invalid --security-opt: %q", opt) + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func (daemon *Daemon) createRootfs(container *Container) error { + // Step 1: create the container directory. + // This doubles as a barrier to avoid race conditions. + if err := os.Mkdir(container.root, 0700); err != nil { + return err + } + initID := fmt.Sprintf("%s-init", container.ID) + if err := daemon.driver.Create(initID, container.ImageID); err != nil { + return err + } + initPath, err := daemon.driver.Get(initID, "") + if err != nil { + return err + } + + if err := setupInitLayer(initPath); err != nil { + daemon.driver.Put(initID) + return err + } + + // We want to unmount init layer before we take snapshot of it + // for the actual container. + daemon.driver.Put(initID) + + if err := daemon.driver.Create(container.ID, initID); err != nil { + return err + } + return nil +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + if k, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("%s", err) + } else { + if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 10, Minor: 0}) < 0 { + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.10.0.", k.String()) + } + } + } + return nil +} + +func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { + var warnings []string + + if config != nil { + // The check for a valid workdir path is made on the server rather than in the + // client. This is because we don't know the type of path (Linux or Windows) + // to validate on the client. + if config.WorkingDir != "" && !filepath.IsAbs(config.WorkingDir) { + return warnings, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir) + } + } + + if hostConfig == nil { + return warnings, nil + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return warnings, fmt.Errorf("Invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return warnings, fmt.Errorf("Invalid port specification: %q", pb.HostPort) + } + } + } + if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") { + return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name()) + } + if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + hostConfig.Memory = 0 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.") + } + if hostConfig.MemorySwappiness != nil && !daemon.SystemConfig().MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + hostConfig.MemorySwappiness = nil + } + if hostConfig.MemorySwappiness != nil { + swappiness := *hostConfig.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100.", swappiness) + } + } + if hostConfig.CpuPeriod > 0 && !daemon.SystemConfig().CpuCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + hostConfig.CpuPeriod = 0 + } + if hostConfig.CpuQuota > 0 && !daemon.SystemConfig().CpuCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + hostConfig.CpuQuota = 0 + } + if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.") + } + if hostConfig.OomKillDisable && !daemon.SystemConfig().OomKillDisable { + hostConfig.OomKillDisable = false + return warnings, fmt.Errorf("Your kernel does not support oom kill disable.") + } + if daemon.SystemConfig().IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + return warnings, nil +} + +// checkConfigOptions checks for mutually incompatible config options +func checkConfigOptions(config *Config) error { + // Check for mutually incompatible config options + if config.Bridge.Iface != "" && config.Bridge.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.") + } + if !config.Bridge.EnableIPTables && !config.Bridge.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") + } + if !config.Bridge.EnableIPTables && config.Bridge.EnableIPMasq { + config.Bridge.EnableIPMasq = false + } + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + if err := checkKernel(); err != nil { + return err + } + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + if config.EnableSelinuxSupport { + if selinuxEnabled() { + // As Docker on btrfs and SELinux are incompatible at present, error on both being enabled + if driverName == "btrfs" { + return fmt.Errorf("SELinux is not supported with the BTRFS graph driver") + } + logrus.Debug("SELinux enabled successfully") + } else { + logrus.Warn("Docker could not enable SELinux on the host system") + } + } else { + selinuxSetDisabled() + } + return nil +} + +// MigrateIfDownlevel is a wrapper for AUFS migration for downlevel +func migrateIfDownlevel(driver graphdriver.Driver, root string) error { + return migrateIfAufs(driver, root) +} + +func configureVolumes(config *Config) error { + volumesDriver, err := local.New(config.Root) + if err != nil { + return err + } + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + return nil +} + +func configureSysInit(config *Config) (string, error) { + localCopy := filepath.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) + sysInitPath := utils.DockerInitPath(localCopy) + if sysInitPath == "" { + return "", fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See https://docs.docker.com/contributing/devenvironment for official build instructions.") + } + + if sysInitPath != localCopy { + // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). + if err := os.Mkdir(filepath.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { + return "", err + } + if _, err := fileutils.CopyFile(sysInitPath, localCopy); err != nil { + return "", err + } + if err := os.Chmod(localCopy, 0700); err != nil { + return "", err + } + sysInitPath = localCopy + } + return sysInitPath, nil +} + +func isBridgeNetworkDisabled(config *Config) bool { + return config.Bridge.Iface == disableNetworkBridge +} + +func networkOptions(dconfig *Config) ([]nwconfig.Option, error) { + options := []nwconfig.Option{} + if dconfig == nil { + return options, nil + } + if strings.TrimSpace(dconfig.DefaultNetwork) != "" { + dn := strings.Split(dconfig.DefaultNetwork, ":") + if len(dn) < 2 { + return nil, fmt.Errorf("default network daemon config must be of the form NETWORKDRIVER:NETWORKNAME") + } + options = append(options, nwconfig.OptionDefaultDriver(dn[0])) + options = append(options, nwconfig.OptionDefaultNetwork(strings.Join(dn[1:], ":"))) + } else { + dd := runconfig.DefaultDaemonNetworkMode() + dn := runconfig.DefaultDaemonNetworkMode().NetworkName() + options = append(options, nwconfig.OptionDefaultDriver(string(dd))) + options = append(options, nwconfig.OptionDefaultNetwork(dn)) + } + + if strings.TrimSpace(dconfig.NetworkKVStore) != "" { + kv := strings.Split(dconfig.NetworkKVStore, ":") + if len(kv) < 2 { + return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER:KV-URL") + } + options = append(options, nwconfig.OptionKVProvider(kv[0])) + options = append(options, nwconfig.OptionKVProviderURL(strings.Join(kv[1:], ":"))) + } + + options = append(options, nwconfig.OptionLabels(dconfig.Labels)) + return options, nil +} + +func initNetworkController(config *Config) (libnetwork.NetworkController, error) { + netOptions, err := networkOptions(config) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + // Initialize default driver "null" + + if err := controller.ConfigureNetworkDriver("null", options.Generic{}); err != nil { + return nil, fmt.Errorf("Error initializing null driver: %v", err) + } + + // Initialize default network on "null" + if _, err := controller.NewNetwork("null", "none"); err != nil { + return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) + } + + // Initialize default driver "host" + if err := controller.ConfigureNetworkDriver("host", options.Generic{}); err != nil { + return nil, fmt.Errorf("Error initializing host driver: %v", err) + } + + // Initialize default network on "host" + if _, err := controller.NewNetwork("host", "host"); err != nil { + return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + option := options.Generic{ + "EnableIPForwarding": config.Bridge.EnableIPForward} + + if err := controller.ConfigureNetworkDriver("bridge", options.Generic{netlabel.GenericData: option}); err != nil { + return fmt.Errorf("Error initializing bridge driver: %v", err) + } + + netOption := options.Generic{ + "BridgeName": config.Bridge.Iface, + "Mtu": config.Mtu, + "EnableIPTables": config.Bridge.EnableIPTables, + "EnableIPMasquerade": config.Bridge.EnableIPMasq, + "EnableICC": config.Bridge.InterContainerCommunication, + "EnableUserlandProxy": config.Bridge.EnableUserlandProxy, + } + + if config.Bridge.IP != "" { + ip, bipNet, err := net.ParseCIDR(config.Bridge.IP) + if err != nil { + return err + } + + bipNet.IP = ip + netOption["AddressIPv4"] = bipNet + } + + if config.Bridge.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.Bridge.FixedCIDR) + if err != nil { + return err + } + + netOption["FixedCIDR"] = fCIDR + } + + if config.Bridge.FixedCIDRv6 != "" { + _, fCIDRv6, err := net.ParseCIDR(config.Bridge.FixedCIDRv6) + if err != nil { + return err + } + + netOption["FixedCIDRv6"] = fCIDRv6 + } + + if config.Bridge.DefaultGatewayIPv4 != nil { + netOption["DefaultGatewayIPv4"] = config.Bridge.DefaultGatewayIPv4 + } + + if config.Bridge.DefaultGatewayIPv6 != nil { + netOption["DefaultGatewayIPv6"] = config.Bridge.DefaultGatewayIPv6 + } + + // --ip processing + if config.Bridge.DefaultIP != nil { + netOption["DefaultBindingIP"] = config.Bridge.DefaultIP + } + + // Initialize default network on "bridge" with the same name + _, err := controller.NewNetwork("bridge", "bridge", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + netlabel.EnableIPv6: config.Bridge.EnableIPv6, + })) + if err != nil { + return fmt.Errorf("Error creating default \"bridge\" network: %v", err) + } + return nil +} + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func setupInitLayer(initLayer string) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerinit": "file", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := system.MkdirAll(filepath.Join(initLayer, filepath.Dir(pth)), 0755); err != nil { + return err + } + switch typ { + case "dir": + if err := system.MkdirAll(filepath.Join(initLayer, pth), 0755); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} + +func (daemon *Daemon) NetworkApiRouter() func(w http.ResponseWriter, req *http.Request) { + return nwapi.NewHTTPHandler(daemon.netController) +} + +func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { + if hostConfig == nil || hostConfig.Links == nil { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := parsers.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.Get(name) + if err != nil { + //An error from daemon.Get() means this name could not be found + return fmt.Errorf("Could not get container for %s", name) + } + for child.hostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2) + child, err = daemon.Get(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.hostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.RegisterLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + hostConfig.Links = nil + if err := container.WriteHostConfig(); err != nil { + return err + } + + return nil +} diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go new file mode 100644 index 00000000..3e160775 --- /dev/null +++ b/daemon/daemon_windows.go @@ -0,0 +1,167 @@ +package daemon + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/windows" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/microsoft/hcsshim" +) + +const DefaultVirtualSwitch = "Virtual Switch" + +func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { + return daemon.driver.Changes(container.ID, container.ImageID) +} + +func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { + return daemon.driver.Diff(container.ID, container.ImageID) +} + +func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error { + return nil +} + +func (daemon *Daemon) createRootfs(container *Container) error { + // Step 1: create the container directory. + // This doubles as a barrier to avoid race conditions. + if err := os.Mkdir(container.root, 0700); err != nil { + return err + } + + if wd, ok := daemon.driver.(*windows.WindowsGraphDriver); ok { + if container.ImageID != "" { + // Get list of paths to parent layers. + logrus.Debugln("createRootfs: Container has parent image:", container.ImageID) + img, err := daemon.graph.Get(container.ImageID) + if err != nil { + return err + } + + ids, err := daemon.graph.ParentLayerIds(img) + if err != nil { + return err + } + logrus.Debugf("Got image ids: %d", len(ids)) + + if err := hcsshim.CreateSandboxLayer(wd.Info(), container.ID, container.ImageID, wd.LayerIdsToPaths(ids)); err != nil { + return err + } + } else { + if err := daemon.driver.Create(container.ID, container.ImageID); err != nil { + return err + } + } + } else { + // Fall-back code path to allow the use of the VFS driver for development + if err := daemon.driver.Create(container.ID, container.ImageID); err != nil { + return err + } + + } + return nil +} + +func checkKernel() error { + return nil +} + +func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { + // TODO Windows. Verifications TBC + return nil, nil +} + +// checkConfigOptions checks for mutually incompatible config options +func checkConfigOptions(config *Config) error { + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + var dwVersion uint32 + + // TODO Windows. May need at some point to ensure have elevation and + // possibly LocalSystem. + + // Validate the OS version. Note that docker.exe must be manifested for this + // call to return the correct version. + dwVersion, err := syscall.GetVersion() + if err != nil { + return fmt.Errorf("Failed to call GetVersion()") + } + if int(dwVersion&0xFF) < 10 { + return fmt.Errorf("This version of Windows does not support the docker daemon") + } + + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + return nil +} + +func migrateIfDownlevel(driver graphdriver.Driver, root string) error { + return nil +} + +func configureVolumes(config *Config) error { + // Windows does not support volumes at this time + return nil +} + +func configureSysInit(config *Config) (string, error) { + // TODO Windows. + return os.Getenv("TEMP"), nil +} + +func isBridgeNetworkDisabled(config *Config) bool { + return false +} + +func initNetworkController(config *Config) (libnetwork.NetworkController, error) { + // Set the name of the virtual switch if not specified by -b on daemon start + if config.Bridge.VirtualSwitchName == "" { + config.Bridge.VirtualSwitchName = DefaultVirtualSwitch + } + return nil, nil +} + +func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { + // TODO Windows. Factored out for network modes. There may be more + // refactoring required here. + + if hostConfig == nil || hostConfig.Links == nil { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := parsers.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.Get(name) + if err != nil { + //An error from daemon.Get() means this name could not be found + return fmt.Errorf("Could not get container for %s", name) + } + if err := daemon.RegisterLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + hostConfig.Links = nil + if err := container.WriteHostConfig(); err != nil { + return err + } + return nil +} diff --git a/daemon/daemon_zfs.go b/daemon/daemon_zfs.go new file mode 100644 index 00000000..2fc1d870 --- /dev/null +++ b/daemon/daemon_zfs.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_zfs,linux + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/zfs" +) diff --git a/daemon/debugtrap_unix.go b/daemon/debugtrap_unix.go new file mode 100644 index 00000000..c4a11b07 --- /dev/null +++ b/daemon/debugtrap_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + "syscall" + + psignal "github.com/docker/docker/pkg/signal" +) + +func setupDumpStackTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + psignal.DumpStacks() + } + }() +} diff --git a/daemon/debugtrap_unsupported.go b/daemon/debugtrap_unsupported.go new file mode 100644 index 00000000..fef1bd77 --- /dev/null +++ b/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows + +package daemon + +func setupDumpStackTrap() { + return +} diff --git a/daemon/debugtrap_windows.go b/daemon/debugtrap_windows.go new file mode 100644 index 00000000..4c47947b --- /dev/null +++ b/daemon/debugtrap_windows.go @@ -0,0 +1,30 @@ +package daemon + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" + psignal "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" +) + +func setupDumpStackTrap() { + // Windows does not support signals like *nix systems. So instead of + // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be + // signalled. + go func() { + sa := syscall.SecurityAttributes{ + Length: 0, + } + ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) + if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { + logrus.Debugf("Stackdump - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + psignal.DumpStacks() + } + } + }() +} diff --git a/daemon/delete.go b/daemon/delete.go new file mode 100644 index 00000000..9990019b --- /dev/null +++ b/daemon/delete.go @@ -0,0 +1,144 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "runtime" + + "github.com/Sirupsen/logrus" +) + +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + if config.RemoveLink { + name, err := GetFullContainerName(name) + if err != nil { + return err + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + pe := daemon.ContainerGraph().Get(parent) + if pe == nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + parentContainer, _ := daemon.Get(pe.ID()) + + if err := daemon.ContainerGraph().Delete(name); err != nil { + return err + } + + if parentContainer != nil { + parentContainer.DisableLink(n) + } + + return nil + } + + if err := daemon.rm(container, config.ForceRemove); err != nil { + return fmt.Errorf("Cannot destroy container %s: %v", name, err) + } + + if config.RemoveVolume { + container.removeMountPoints() + } + return nil +} + +// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem. +func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) { + if container.IsRunning() { + if !forceRemove { + return fmt.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f") + } + if err := container.Kill(); err != nil { + return fmt.Errorf("Could not kill running container, cannot remove - %v", err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.stopCollection(container) + + element := daemon.containers.Get(container.ID) + if element == nil { + return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) + } + + // Container state RemovalInProgress should be used to avoid races. + if err = container.SetRemovalInProgress(); err != nil { + return fmt.Errorf("Failed to set container state to RemovalInProgress: %s", err) + } + + defer container.ResetRemovalInProgress() + + if err = container.Stop(3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.SetDead() + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + + // If force removal is required, delete container from various + // indexes even if removal failed. + defer func() { + if err != nil && forceRemove { + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + os.RemoveAll(container.root) + container.LogEvent("destroy") + } + }() + + if _, err := daemon.containerGraph.Purge(container.ID); err != nil { + logrus.Debugf("Unable to remove container from link graph: %s", err) + } + + if err = daemon.driver.Remove(container.ID); err != nil { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) + } + + // There will not be an -init on Windows, so don't fail by not attempting to delete it + if runtime.GOOS != "windows" { + initID := fmt.Sprintf("%s-init", container.ID) + if err := daemon.driver.Remove(initID); err != nil { + return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err) + } + } + + if err = os.RemoveAll(container.root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + if err = daemon.execDriver.Clean(container.ID); err != nil { + return fmt.Errorf("Unable to remove execdriver data for %s: %s", container.ID, err) + } + + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + + container.LogEvent("destroy") + return nil +} + +func (daemon *Daemon) DeleteVolumes(c *Container) error { + return c.removeMountPoints() +} diff --git a/daemon/events/events.go b/daemon/events/events.go new file mode 100644 index 00000000..07ee29a3 --- /dev/null +++ b/daemon/events/events.go @@ -0,0 +1,66 @@ +package events + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/pubsub" +) + +const eventsLimit = 64 + +// Events is pubsub channel for *jsonmessage.JSONMessage +type Events struct { + mu sync.Mutex + events []*jsonmessage.JSONMessage + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]*jsonmessage.JSONMessage, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, 1024), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored last events +// channel in which you can expect new events in form of interface{}, so you +// need type assertion. +func (e *Events) Subscribe() ([]*jsonmessage.JSONMessage, chan interface{}) { + e.mu.Lock() + current := make([]*jsonmessage.JSONMessage, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + return current, l +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + e.pub.Evict(l) +} + +// Log broadcasts event to listeners. Each listener has 100 millisecond for +// receiving event or it will be skipped. +func (e *Events) Log(action, id, from string) { + go func() { + e.mu.Lock() + jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, Time: time.Now().UTC().Unix()} + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) + }() +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} diff --git a/daemon/events/events_test.go b/daemon/events/events_test.go new file mode 100644 index 00000000..7aa8d9fa --- /dev/null +++ b/daemon/events/events_test.go @@ -0,0 +1,135 @@ +package events + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/pkg/jsonmessage" +) + +func TestEventsLog(t *testing.T) { + e := New() + _, l1 := e.Subscribe() + _, l2 := e.Subscribe() + defer e.Evict(l1) + defer e.Evict(l2) + count := e.SubscribersCount() + if count != 2 { + t.Fatalf("Must be 2 subscribers, got %d", count) + } + e.Log("test", "cont", "image") + select { + case msg := <-l1: + jmsg, ok := msg.(*jsonmessage.JSONMessage) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } + select { + case msg := <-l2: + jmsg, ok := msg.(*jsonmessage.JSONMessage) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } +} + +func TestEventsLogTimeout(t *testing.T) { + e := New() + _, l := e.Subscribe() + defer e.Evict(l) + + c := make(chan struct{}) + go func() { + e.Log("test", "cont", "image") + close(c) + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Fatal("Timeout publishing message") + } +} + +func TestLogEvents(t *testing.T) { + e := New() + + for i := 0; i < eventsLimit+16; i++ { + action := fmt.Sprintf("action_%d", i) + id := fmt.Sprintf("cont_%d", i) + from := fmt.Sprintf("image_%d", i) + e.Log(action, id, from) + } + time.Sleep(50 * time.Millisecond) + current, l := e.Subscribe() + for i := 0; i < 10; i++ { + num := i + eventsLimit + 16 + action := fmt.Sprintf("action_%d", num) + id := fmt.Sprintf("cont_%d", num) + from := fmt.Sprintf("image_%d", num) + e.Log(action, id, from) + } + if len(e.events) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) + } + + var msgs []*jsonmessage.JSONMessage + for len(msgs) < 10 { + m := <-l + jm, ok := (m).(*jsonmessage.JSONMessage) + if !ok { + t.Fatalf("Unexpected type %T", m) + } + msgs = append(msgs, jm) + } + if len(current) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) + } + first := current[0] + if first.Status != "action_16" { + t.Fatalf("First action is %s, must be action_16", first.Status) + } + last := current[len(current)-1] + if last.Status != "action_79" { + t.Fatalf("Last action is %s, must be action_79", last.Status) + } + + firstC := msgs[0] + if firstC.Status != "action_80" { + t.Fatalf("First action is %s, must be action_80", firstC.Status) + } + lastC := msgs[len(msgs)-1] + if lastC.Status != "action_89" { + t.Fatalf("Last action is %s, must be action_89", lastC.Status) + } +} diff --git a/daemon/exec.go b/daemon/exec.go new file mode 100644 index 00000000..790fad5d --- /dev/null +++ b/daemon/exec.go @@ -0,0 +1,287 @@ +package daemon + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" +) + +type execConfig struct { + sync.Mutex + ID string + Running bool + ExitCode int + ProcessConfig *execdriver.ProcessConfig + StreamConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + Container *Container + canRemove bool +} + +type execStore struct { + s map[string]*execConfig + sync.RWMutex +} + +func newExecStore() *execStore { + return &execStore{s: make(map[string]*execConfig, 0)} +} + +func (e *execStore) Add(id string, execConfig *execConfig) { + e.Lock() + e.s[id] = execConfig + e.Unlock() +} + +func (e *execStore) Get(id string) *execConfig { + e.RLock() + res := e.s[id] + e.RUnlock() + return res +} + +func (e *execStore) Delete(id string) { + e.Lock() + delete(e.s, id) + e.Unlock() +} + +func (e *execStore) List() []string { + var IDs []string + e.RLock() + for id := range e.s { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} + +func (execConfig *execConfig) Resize(h, w int) error { + return execConfig.ProcessConfig.Terminal.Resize(h, w) +} + +func (d *Daemon) registerExecCommand(execConfig *execConfig) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + execConfig.Container.execCommands.Add(execConfig.ID, execConfig) + // Storing execs in daemon for easy access via remote API. + d.execCommands.Add(execConfig.ID, execConfig) +} + +func (d *Daemon) getExecConfig(name string) (*execConfig, error) { + if execConfig := d.execCommands.Get(name); execConfig != nil { + if !execConfig.Container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running", execConfig.Container.ID) + } + return execConfig, nil + } + + return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name) +} + +func (d *Daemon) unregisterExecCommand(execConfig *execConfig) { + execConfig.Container.execCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*Container, error) { + container, err := d.Get(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running", name) + } + if container.IsPaused() { + return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name) + } + return container, nil +} + +func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) { + // Not all drivers support Exec (LXC for example) + if err := checkExecSupport(d.execDriver.Name()); err != nil { + return "", err + } + + container, err := d.getActiveContainer(config.Container) + if err != nil { + return "", err + } + + cmd := runconfig.NewCommand(config.Cmd...) + entrypoint, args := d.getEntrypointAndArgs(runconfig.NewEntrypoint(), cmd) + + user := config.User + if len(user) == 0 { + user = container.Config.User + } + + processConfig := &execdriver.ProcessConfig{ + Tty: config.Tty, + Entrypoint: entrypoint, + Arguments: args, + User: user, + } + + execConfig := &execConfig{ + ID: stringid.GenerateRandomID(), + OpenStdin: config.AttachStdin, + OpenStdout: config.AttachStdout, + OpenStderr: config.AttachStderr, + StreamConfig: StreamConfig{}, + ProcessConfig: processConfig, + Container: container, + Running: false, + } + + d.registerExecCommand(execConfig) + + container.LogEvent("exec_create: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " ")) + + return execConfig.ID, nil + +} + +func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { + + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + execConfig, err := d.getExecConfig(execName) + if err != nil { + return err + } + + func() { + execConfig.Lock() + defer execConfig.Unlock() + if execConfig.Running { + err = fmt.Errorf("Error: Exec command %s is already running", execName) + } + execConfig.Running = true + }() + if err != nil { + return err + } + + logrus.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID) + container := execConfig.Container + + container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " ")) + + if execConfig.OpenStdin { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debugf("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if execConfig.OpenStdout { + cStdout = stdout + } + if execConfig.OpenStderr { + cStderr = stderr + } + + execConfig.StreamConfig.stderr = broadcastwriter.New() + execConfig.StreamConfig.stdout = broadcastwriter.New() + // Attach to stdin + if execConfig.OpenStdin { + execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe() + } else { + execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin + } + + attachErr := attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) + + execErr := make(chan error) + + // Note, the execConfig data will be removed when the container + // itself is deleted. This allows us to query it (for things like + // the exitStatus) even after the cmd is done running. + + go func() { + if err := container.Exec(execConfig); err != nil { + execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) + } + }() + select { + case err := <-attachErr: + if err != nil { + return fmt.Errorf("attach failed with error: %s", err) + } + break + case err := <-execErr: + return err + } + + return nil +} + +func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + exitStatus, err := d.execDriver.Exec(c.command, execConfig.ProcessConfig, pipes, startCallback) + + // On err, make sure we don't leave ExitCode at zero + if err != nil && exitStatus == 0 { + exitStatus = 128 + } + + execConfig.ExitCode = exitStatus + execConfig.Running = false + + return exitStatus, err +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.s { + if config.canRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.canRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.execCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/daemon/exec_linux.go b/daemon/exec_linux.go new file mode 100644 index 00000000..a3603263 --- /dev/null +++ b/daemon/exec_linux.go @@ -0,0 +1,18 @@ +// +build linux + +package daemon + +import ( + "strings" + + "github.com/docker/docker/daemon/execdriver/lxc" +) + +// checkExecSupport returns an error if the exec driver does not support exec, +// or nil if it is supported. +func checkExecSupport(drivername string) error { + if strings.HasPrefix(drivername, lxc.DriverName) { + return lxc.ErrExec + } + return nil +} diff --git a/daemon/exec_windows.go b/daemon/exec_windows.go new file mode 100644 index 00000000..d6f244e6 --- /dev/null +++ b/daemon/exec_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package daemon + +// checkExecSupport returns an error if the exec driver does not support exec, +// or nil if it is supported. +func checkExecSupport(DriverName string) error { + return nil +} diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go new file mode 100644 index 00000000..bd2e24cd --- /dev/null +++ b/daemon/execdriver/driver.go @@ -0,0 +1,180 @@ +package execdriver + +import ( + "errors" + "io" + "os/exec" + "time" + + // TODO Windows: Factor out ulimit + "github.com/docker/docker/pkg/ulimit" + "github.com/opencontainers/runc/libcontainer" + "github.com/opencontainers/runc/libcontainer/configs" +) + +// Context is a generic key value pair that allows +// arbatrary data to be sent +type Context map[string]string + +var ( + ErrNotRunning = errors.New("Container is not running") + ErrWaitTimeoutReached = errors.New("Wait timeout reached") + ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") + ErrDriverNotFound = errors.New("The requested docker init has not been found") +) + +type StartCallback func(*ProcessConfig, int) + +// Driver specific information based on +// processes registered with the driver +type Info interface { + IsRunning() bool +} + +// Terminal in an interface for drivers to implement +// if they want to support Close and Resize calls from +// the core +type Terminal interface { + io.Closer + Resize(height, width int) error +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +type Driver interface { + Run(c *Command, pipes *Pipes, startCallback StartCallback) (ExitStatus, error) // Run executes the process and blocks until the process exits and returns the exit code + // Exec executes the process in an existing container, blocks until the process exits and returns the exit code + Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error) + Kill(c *Command, sig int) error + Pause(c *Command) error + Unpause(c *Command) error + Name() string // Driver name + Info(id string) Info // "temporary" hack (until we move state from core to plugins) + GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. + Terminate(c *Command) error // kill it with fire + Clean(id string) error // clean all traces of container exec + Stats(id string) (*ResourceStats, error) // Get resource stats for a running container +} + +// Network settings of the container +type Network struct { + Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled + Mtu int `json:"mtu"` + ContainerID string `json:"container_id"` // id of the container to join network. + NamespacePath string `json:"namespace_path"` + HostNetworking bool `json:"host_networking"` +} + +// IPC settings of the container +type Ipc struct { + ContainerID string `json:"container_id"` // id of the container to join ipc. + HostIpc bool `json:"host_ipc"` +} + +// PID settings of the container +type Pid struct { + HostPid bool `json:"host_pid"` +} + +// UTS settings of the container +type UTS struct { + HostUTS bool `json:"host_uts"` +} + +type NetworkInterface struct { + Gateway string `json:"gateway"` + IPAddress string `json:"ip"` + IPPrefixLen int `json:"ip_prefix_len"` + MacAddress string `json:"mac"` + Bridge string `json:"bridge"` + GlobalIPv6Address string `json:"global_ipv6"` + LinkLocalIPv6Address string `json:"link_local_ipv6"` + GlobalIPv6PrefixLen int `json:"global_ipv6_prefix_len"` + IPv6Gateway string `json:"ipv6_gateway"` + HairpinMode bool `json:"hairpin_mode"` +} + +// TODO Windows: Factor out ulimit.Rlimit +type Resources struct { + Memory int64 `json:"memory"` + MemorySwap int64 `json:"memory_swap"` + CpuShares int64 `json:"cpu_shares"` + CpusetCpus string `json:"cpuset_cpus"` + CpusetMems string `json:"cpuset_mems"` + CpuPeriod int64 `json:"cpu_period"` + CpuQuota int64 `json:"cpu_quota"` + BlkioWeight int64 `json:"blkio_weight"` + Rlimits []*ulimit.Rlimit `json:"rlimits"` + OomKillDisable bool `json:"oom_kill_disable"` + MemorySwappiness int64 `json:"memory_swappiness"` +} + +type ResourceStats struct { + *libcontainer.Stats + Read time.Time `json:"read"` + MemoryLimit int64 `json:"memory_limit"` + SystemUsage uint64 `json:"system_usage"` +} + +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Private bool `json:"private"` + Slave bool `json:"slave"` +} + +// Describes a process that will be run inside a container. +type ProcessConfig struct { + exec.Cmd `json:"-"` + + Privileged bool `json:"privileged"` + User string `json:"user"` + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Terminal Terminal `json:"-"` // standard or tty terminal + Console string `json:"-"` // dev/console path + ConsoleSize [2]int `json:"-"` // h,w of initial console size +} + +// TODO Windows: Factor out unused fields such as LxcConfig, AppArmorProfile, +// and CgroupParent. +// +// Process wrapps an os/exec.Cmd to add more metadata +type Command struct { + ID string `json:"id"` + Rootfs string `json:"rootfs"` // root fs of the container + ReadonlyRootfs bool `json:"readonly_rootfs"` + InitPath string `json:"initpath"` // dockerinit + WorkingDir string `json:"working_dir"` + ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver + Network *Network `json:"network"` + Ipc *Ipc `json:"ipc"` + Pid *Pid `json:"pid"` + UTS *UTS `json:"uts"` + Resources *Resources `json:"resources"` + Mounts []Mount `json:"mounts"` + AllowedDevices []*configs.Device `json:"allowed_devices"` + AutoCreatedDevices []*configs.Device `json:"autocreated_devices"` + CapAdd []string `json:"cap_add"` + CapDrop []string `json:"cap_drop"` + GroupAdd []string `json:"group_add"` + ContainerPid int `json:"container_pid"` // the pid for the process inside a container + ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container. + ProcessLabel string `json:"process_label"` + MountLabel string `json:"mount_label"` + LxcConfig []string `json:"lxc_config"` + AppArmorProfile string `json:"apparmor_profile"` + CgroupParent string `json:"cgroup_parent"` // The parent cgroup for this command. + FirstStart bool `json:"first_start"` + LayerPaths []string `json:"layer_paths"` // Windows needs to know the layer paths and folder for a command + LayerFolder string `json:"layer_folder"` +} diff --git a/daemon/execdriver/driver_linux.go b/daemon/execdriver/driver_linux.go new file mode 100644 index 00000000..484c5f5e --- /dev/null +++ b/daemon/execdriver/driver_linux.go @@ -0,0 +1,161 @@ +package execdriver + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/daemon/execdriver/native/template" + "github.com/opencontainers/runc/libcontainer" + "github.com/opencontainers/runc/libcontainer/cgroups/fs" + "github.com/opencontainers/runc/libcontainer/configs" +) + +func InitContainer(c *Command) *configs.Config { + container := template.New() + + container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env) + container.Cgroups.Name = c.ID + container.Cgroups.AllowedDevices = c.AllowedDevices + container.Devices = c.AutoCreatedDevices + container.Rootfs = c.Rootfs + container.Readonlyfs = c.ReadonlyRootfs + container.Privatefs = true + + // check to see if we are running in ramdisk to disable pivot root + container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" + + // Default parent cgroup is "docker". Override if required. + if c.CgroupParent != "" { + container.Cgroups.Parent = c.CgroupParent + } + return container +} + +func getEnv(key string, env []string) string { + for _, pair := range env { + parts := strings.SplitN(pair, "=", 2) + if parts[0] == key { + return parts[1] + } + } + return "" +} + +func SetupCgroups(container *configs.Config, c *Command) error { + if c.Resources != nil { + container.Cgroups.CpuShares = c.Resources.CpuShares + container.Cgroups.Memory = c.Resources.Memory + container.Cgroups.MemoryReservation = c.Resources.Memory + container.Cgroups.MemorySwap = c.Resources.MemorySwap + container.Cgroups.CpusetCpus = c.Resources.CpusetCpus + container.Cgroups.CpusetMems = c.Resources.CpusetMems + container.Cgroups.CpuPeriod = c.Resources.CpuPeriod + container.Cgroups.CpuQuota = c.Resources.CpuQuota + container.Cgroups.BlkioWeight = c.Resources.BlkioWeight + container.Cgroups.OomKillDisable = c.Resources.OomKillDisable + container.Cgroups.MemorySwappiness = c.Resources.MemorySwappiness + } + + return nil +} + +// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. +func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) { + out := &libcontainer.NetworkInterface{Name: interfaceName} + // This can happen if the network runtime information is missing - possible if the + // container was created by an old version of libcontainer. + if interfaceName == "" { + return out, nil + } + type netStatsPair struct { + // Where to write the output. + Out *uint64 + // The network stats file to read. + File string + } + // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. + netStats := []netStatsPair{ + {Out: &out.RxBytes, File: "tx_bytes"}, + {Out: &out.RxPackets, File: "tx_packets"}, + {Out: &out.RxErrors, File: "tx_errors"}, + {Out: &out.RxDropped, File: "tx_dropped"}, + + {Out: &out.TxBytes, File: "rx_bytes"}, + {Out: &out.TxPackets, File: "rx_packets"}, + {Out: &out.TxErrors, File: "rx_errors"}, + {Out: &out.TxDropped, File: "rx_dropped"}, + } + for _, netStat := range netStats { + data, err := readSysfsNetworkStats(interfaceName, netStat.File) + if err != nil { + return nil, err + } + *(netStat.Out) = data + } + return out, nil +} + +// Reads the specified statistics available under /sys/class/net//statistics +func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { + data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile)) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) { + f, err := os.Open(filepath.Join(containerDir, "state.json")) + if err != nil { + return nil, err + } + defer f.Close() + + type network struct { + Type string + HostInterfaceName string + } + + state := struct { + CgroupPaths map[string]string `json:"cgroup_paths"` + Networks []network + }{} + + if err := json.NewDecoder(f).Decode(&state); err != nil { + return nil, err + } + now := time.Now() + + mgr := fs.Manager{Paths: state.CgroupPaths} + cstats, err := mgr.GetStats() + if err != nil { + return nil, err + } + stats := &libcontainer.Stats{CgroupStats: cstats} + // if the container does not have any memory limit specified set the + // limit to the machines memory + memoryLimit := containerMemoryLimit + if memoryLimit == 0 { + memoryLimit = machineMemory + } + for _, iface := range state.Networks { + switch iface.Type { + case "veth": + istats, err := getNetworkInterfaceStats(iface.HostInterfaceName) + if err != nil { + return nil, err + } + stats.Interfaces = append(stats.Interfaces, istats) + } + } + return &ResourceStats{ + Stats: stats, + Read: now, + MemoryLimit: memoryLimit, + }, nil +} diff --git a/daemon/execdriver/execdrivers/execdrivers_linux.go b/daemon/execdriver/execdrivers/execdrivers_linux.go new file mode 100644 index 00000000..bbad3048 --- /dev/null +++ b/daemon/execdriver/execdrivers/execdrivers_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package execdrivers + +import ( + "fmt" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/lxc" + "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/pkg/sysinfo" +) + +func NewDriver(name string, options []string, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { + switch name { + case "lxc": + // we want to give the lxc driver the full docker root because it needs + // to access and write config and template files in /var/lib/docker/containers/* + // to be backwards compatible + logrus.Warn("LXC built-in support is deprecated.") + return lxc.NewDriver(root, libPath, initPath, sysInfo.AppArmor) + case "native": + return native.NewDriver(path.Join(root, "execdriver", "native"), initPath, options) + } + return nil, fmt.Errorf("unknown exec driver %s", name) +} diff --git a/daemon/execdriver/execdrivers/execdrivers_windows.go b/daemon/execdriver/execdrivers/execdrivers_windows.go new file mode 100644 index 00000000..d169b900 --- /dev/null +++ b/daemon/execdriver/execdrivers/execdrivers_windows.go @@ -0,0 +1,19 @@ +// +build windows + +package execdrivers + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/windows" + "github.com/docker/docker/pkg/sysinfo" +) + +func NewDriver(name string, options []string, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { + switch name { + case "windows": + return windows.NewDriver(root, initPath, options) + } + return nil, fmt.Errorf("unknown exec driver %s", name) +} diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go new file mode 100644 index 00000000..a3a9e0c6 --- /dev/null +++ b/daemon/execdriver/lxc/driver.go @@ -0,0 +1,856 @@ +// +build linux + +package lxc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/stringutils" + sysinfo "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/version" + "github.com/kr/pty" + "github.com/opencontainers/runc/libcontainer" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/vishvananda/netns" +) + +const DriverName = "lxc" + +var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver") + +type driver struct { + root string // root path for the driver to use + libPath string + initPath string + apparmor bool + sharedRoot bool + activeContainers map[string]*activeContainer + machineMemory int64 + sync.Mutex +} + +type activeContainer struct { + container *configs.Config + cmd *exec.Cmd +} + +func NewDriver(root, libPath, initPath string, apparmor bool) (*driver, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + // setup unconfined symlink + if err := linkLxcStart(root); err != nil { + return nil, err + } + meminfo, err := sysinfo.ReadMemInfo() + if err != nil { + return nil, err + } + return &driver{ + apparmor: apparmor, + root: root, + libPath: libPath, + initPath: initPath, + sharedRoot: rootIsShared(), + activeContainers: make(map[string]*activeContainer), + machineMemory: meminfo.MemTotal, + }, nil +} + +func (d *driver) Name() string { + version := d.version() + return fmt.Sprintf("%s-%s", DriverName, version) +} + +func setupNetNs(nsPath string) (*os.Process, error) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + origns, err := netns.Get() + if err != nil { + return nil, err + } + defer origns.Close() + + f, err := os.OpenFile(nsPath, os.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("failed to get network namespace %q: %v", nsPath, err) + } + defer f.Close() + + nsFD := f.Fd() + if err := netns.Set(netns.NsHandle(nsFD)); err != nil { + return nil, fmt.Errorf("failed to set network namespace %q: %v", nsPath, err) + } + defer netns.Set(origns) + + cmd := exec.Command("/bin/sh", "-c", "while true; do sleep 1; done") + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start netns process: %v", err) + } + + return cmd.Process, nil +} + +func killNetNsProc(proc *os.Process) { + proc.Kill() + proc.Wait() +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { + var ( + term execdriver.Terminal + err error + dataPath = d.containerDir(c.ID) + ) + + if c.Network == nil || (c.Network.NamespacePath == "" && c.Network.ContainerID == "") { + return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("empty namespace path for non-container network") + } + + container, err := d.createContainer(c) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + if c.ProcessConfig.Tty { + term, err = NewTtyConsole(&c.ProcessConfig, pipes) + } else { + term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) + } + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + c.ProcessConfig.Terminal = term + + d.Lock() + d.activeContainers[c.ID] = &activeContainer{ + container: container, + cmd: &c.ProcessConfig.Cmd, + } + d.Unlock() + + c.Mounts = append(c.Mounts, execdriver.Mount{ + Source: d.initPath, + Destination: c.InitPath, + Writable: false, + Private: true, + }) + + if err := d.generateEnvConfig(c); err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + configPath, err := d.generateLXCConfig(c) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + params := []string{ + "lxc-start", + "-n", c.ID, + "-f", configPath, + "-q", + } + + // From lxc>=1.1 the default behavior is to daemonize containers after start + lxcVersion := version.Version(d.version()) + if lxcVersion.GreaterThanOrEqualTo(version.Version("1.1")) { + params = append(params, "-F") + } + + proc := &os.Process{} + if c.Network.ContainerID != "" { + params = append(params, + "--share-net", c.Network.ContainerID, + ) + } else { + proc, err = setupNetNs(c.Network.NamespacePath) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + pidStr := fmt.Sprintf("%d", proc.Pid) + params = append(params, + "--share-net", pidStr) + } + if c.Ipc != nil { + if c.Ipc.ContainerID != "" { + params = append(params, + "--share-ipc", c.Ipc.ContainerID, + ) + } else if c.Ipc.HostIpc { + params = append(params, + "--share-ipc", "1", + ) + } + } + + params = append(params, + "--", + c.InitPath, + ) + + if c.ProcessConfig.User != "" { + params = append(params, "-u", c.ProcessConfig.User) + } + + if c.ProcessConfig.Privileged { + if d.apparmor { + params[0] = path.Join(d.root, "lxc-start-unconfined") + + } + params = append(params, "-privileged") + } + + if c.WorkingDir != "" { + params = append(params, "-w", c.WorkingDir) + } + + params = append(params, "--", c.ProcessConfig.Entrypoint) + params = append(params, c.ProcessConfig.Arguments...) + + if d.sharedRoot { + // lxc-start really needs / to be non-shared, or all kinds of stuff break + // when lxc-start unmount things and those unmounts propagate to the main + // mount namespace. + // What we really want is to clone into a new namespace and then + // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork + // without exec in go we have to do this horrible shell hack... + shellString := + "mount --make-rslave /; exec " + + stringutils.ShellQuoteArguments(params) + + params = []string{ + "unshare", "-m", "--", "/bin/sh", "-c", shellString, + } + } + logrus.Debugf("lxc params %s", params) + var ( + name = params[0] + arg = params[1:] + ) + aname, err := exec.LookPath(name) + if err != nil { + aname = name + } + c.ProcessConfig.Path = aname + c.ProcessConfig.Args = append([]string{name}, arg...) + + if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { + killNetNsProc(proc) + return execdriver.ExitStatus{ExitCode: -1}, err + } + + if err := c.ProcessConfig.Start(); err != nil { + killNetNsProc(proc) + return execdriver.ExitStatus{ExitCode: -1}, err + } + + var ( + waitErr error + waitLock = make(chan struct{}) + ) + + go func() { + if err := c.ProcessConfig.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 + waitErr = err + } + } + close(waitLock) + }() + + terminate := func(terr error) (execdriver.ExitStatus, error) { + if c.ProcessConfig.Process != nil { + c.ProcessConfig.Process.Kill() + c.ProcessConfig.Wait() + } + return execdriver.ExitStatus{ExitCode: -1}, terr + } + // Poll lxc for RUNNING status + pid, err := d.waitForStart(c, waitLock) + if err != nil { + killNetNsProc(proc) + return terminate(err) + } + killNetNsProc(proc) + + cgroupPaths, err := cgroupPaths(c.ID) + if err != nil { + return terminate(err) + } + + state := &libcontainer.State{ + InitProcessPid: pid, + CgroupPaths: cgroupPaths, + } + + f, err := os.Create(filepath.Join(dataPath, "state.json")) + if err != nil { + return terminate(err) + } + defer f.Close() + + if err := json.NewEncoder(f).Encode(state); err != nil { + return terminate(err) + } + + c.ContainerPid = pid + + if startCallback != nil { + logrus.Debugf("Invoking startCallback") + startCallback(&c.ProcessConfig, pid) + } + + oomKill := false + oomKillNotification, err := notifyOnOOM(cgroupPaths) + + <-waitLock + exitCode := getExitCode(c) + + if err == nil { + _, oomKill = <-oomKillNotification + logrus.Debugf("oomKill error: %v, waitErr: %v", oomKill, waitErr) + } else { + logrus.Warnf("Your kernel does not support OOM notifications: %s", err) + } + + // check oom error + if oomKill { + exitCode = 137 + } + + return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr +} + +// copy from libcontainer +func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) { + dir := paths["memory"] + if dir == "" { + return nil, fmt.Errorf("There is no path for %q in state", "memory") + } + oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control")) + if err != nil { + return nil, err + } + fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if syserr != 0 { + oomControl.Close() + return nil, syserr + } + + eventfd := os.NewFile(fd, "eventfd") + + eventControlPath := filepath.Join(dir, "cgroup.event_control") + data := fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd()) + if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil { + eventfd.Close() + oomControl.Close() + return nil, err + } + ch := make(chan struct{}) + go func() { + defer func() { + close(ch) + eventfd.Close() + oomControl.Close() + }() + buf := make([]byte, 8) + for { + if _, err := eventfd.Read(buf); err != nil { + return + } + // When a cgroup is destroyed, an event is sent to eventfd. + // So if the control path is gone, return instead of notifying. + if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { + return + } + ch <- struct{}{} + } + }() + return ch, nil +} + +// createContainer populates and configures the container type with the +// data provided by the execdriver.Command +func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error) { + container := execdriver.InitContainer(c) + if err := execdriver.SetupCgroups(container, c); err != nil { + return nil, err + } + return container, nil +} + +// Return an map of susbystem -> container cgroup +func cgroupPaths(containerId string) (map[string]string, error) { + subsystems, err := cgroups.GetAllSubsystems() + if err != nil { + return nil, err + } + logrus.Debugf("subsystems: %s", subsystems) + paths := make(map[string]string) + for _, subsystem := range subsystems { + cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem) + logrus.Debugf("cgroup path %s %s", cgroupRoot, cgroupDir) + if err != nil { + //unsupported subystem + continue + } + path := filepath.Join(cgroupRoot, cgroupDir, "lxc", containerId) + paths[subsystem] = path + } + + return paths, nil +} + +// this is copy from old libcontainer nodes.go +func createDeviceNodes(rootfs string, nodesToCreate []*configs.Device) error { + oldMask := syscall.Umask(0000) + defer syscall.Umask(oldMask) + + for _, node := range nodesToCreate { + if err := createDeviceNode(rootfs, node); err != nil { + return err + } + } + return nil +} + +// Creates the device node in the rootfs of the container. +func createDeviceNode(rootfs string, node *configs.Device) error { + var ( + dest = filepath.Join(rootfs, node.Path) + parent = filepath.Dir(dest) + ) + + if err := os.MkdirAll(parent, 0755); err != nil { + return err + } + + fileMode := node.FileMode + switch node.Type { + case 'c': + fileMode |= syscall.S_IFCHR + case 'b': + fileMode |= syscall.S_IFBLK + default: + return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) + } + + if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil && !os.IsExist(err) { + return fmt.Errorf("mknod %s %s", node.Path, err) + } + + if err := syscall.Chown(dest, int(node.Uid), int(node.Gid)); err != nil { + return fmt.Errorf("chown %s to %d:%d", node.Path, node.Uid, node.Gid) + } + + return nil +} + +// setupUser changes the groups, gid, and uid for the user inside the container +// copy from libcontainer, cause not it's private +func setupUser(userSpec string) error { + // Set up defaults. + defaultExecUser := user.ExecUser{ + Uid: syscall.Getuid(), + Gid: syscall.Getgid(), + Home: "/", + } + passwdPath, err := user.GetPasswdPath() + if err != nil { + return err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return err + } + execUser, err := user.GetExecUserPath(userSpec, &defaultExecUser, passwdPath, groupPath) + if err != nil { + return err + } + if err := syscall.Setgroups(execUser.Sgids); err != nil { + return err + } + if err := system.Setgid(execUser.Gid); err != nil { + return err + } + if err := system.Setuid(execUser.Uid); err != nil { + return err + } + // if we didn't get HOME already, set it based on the user's HOME + if envHome := os.Getenv("HOME"); envHome == "" { + if err := os.Setenv("HOME", execUser.Home); err != nil { + return err + } + } + return nil +} + +/// Return the exit code of the process +// if the process has not exited -1 will be returned +func getExitCode(c *execdriver.Command) int { + if c.ProcessConfig.ProcessState == nil { + return -1 + } + return c.ProcessConfig.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (d *driver) Kill(c *execdriver.Command, sig int) error { + if sig == 9 || c.ProcessConfig.Process == nil { + return KillLxc(c.ID, sig) + } + + return c.ProcessConfig.Process.Signal(syscall.Signal(sig)) +} + +func (d *driver) Pause(c *execdriver.Command) error { + _, err := exec.LookPath("lxc-freeze") + if err == nil { + output, errExec := exec.Command("lxc-freeze", "-n", c.ID).CombinedOutput() + if errExec != nil { + return fmt.Errorf("Err: %s Output: %s", errExec, output) + } + } + + return err +} + +func (d *driver) Unpause(c *execdriver.Command) error { + _, err := exec.LookPath("lxc-unfreeze") + if err == nil { + output, errExec := exec.Command("lxc-unfreeze", "-n", c.ID).CombinedOutput() + if errExec != nil { + return fmt.Errorf("Err: %s Output: %s", errExec, output) + } + } + + return err +} + +func (d *driver) Terminate(c *execdriver.Command) error { + return KillLxc(c.ID, 9) +} + +func (d *driver) version() string { + var ( + version string + output []byte + err error + ) + if _, errPath := exec.LookPath("lxc-version"); errPath == nil { + output, err = exec.Command("lxc-version").CombinedOutput() + } else { + output, err = exec.Command("lxc-start", "--version").CombinedOutput() + } + if err == nil { + version = strings.TrimSpace(string(output)) + if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { + version = strings.TrimSpace(parts[1]) + } + } + return version +} + +func KillLxc(id string, sig int) error { + var ( + err error + output []byte + ) + _, err = exec.LookPath("lxc-kill") + if err == nil { + output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() + } else { + // lxc-stop does not take arbitrary signals like lxc-kill does + output, err = exec.Command("lxc-stop", "-k", "-n", id).CombinedOutput() + } + if err != nil { + return fmt.Errorf("Err: %s Output: %s", err, output) + } + return nil +} + +// wait for the process to start and return the pid for the process +func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { + var ( + err error + output []byte + ) + // We wait for the container to be fully running. + // Timeout after 5 seconds. In case of broken pipe, just retry. + // Note: The container can run and finish correctly before + // the end of this loop + for now := time.Now(); time.Since(now) < 5*time.Second; { + select { + case <-waitLock: + // If the process dies while waiting for it, just return + return -1, nil + default: + } + + output, err = d.getInfo(c.ID) + if err == nil { + info, err := parseLxcInfo(string(output)) + if err != nil { + return -1, err + } + if info.Running { + return info.Pid, nil + } + } + time.Sleep(50 * time.Millisecond) + } + return -1, execdriver.ErrNotRunning +} + +func (d *driver) getInfo(id string) ([]byte, error) { + return exec.Command("lxc-info", "-n", id).CombinedOutput() +} + +type info struct { + ID string + driver *driver +} + +func (i *info) IsRunning() bool { + var running bool + + output, err := i.driver.getInfo(i.ID) + if err != nil { + logrus.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) + return false + } + if strings.Contains(string(output), "RUNNING") { + running = true + } + return running +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func findCgroupRootAndDir(subsystem string) (string, string, error) { + cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return "", "", err + } + + cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) + if err != nil { + return "", "", err + } + return cgroupRoot, cgroupDir, nil +} + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + pids := []int{} + + // cpu is chosen because it is the only non optional subsystem in cgroups + subsystem := "cpu" + cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem) + if err != nil { + return pids, err + } + + filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") + if _, err := os.Stat(filename); os.IsNotExist(err) { + // With more recent lxc versions use, cgroup will be in lxc/ + filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") + } + + output, err := ioutil.ReadFile(filename) + if err != nil { + return pids, err + } + for _, p := range strings.Split(string(output), "\n") { + if len(p) == 0 { + continue + } + pid, err := strconv.Atoi(p) + if err != nil { + return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) + } + pids = append(pids, pid) + } + return pids, nil +} + +func linkLxcStart(root string) error { + sourcePath, err := exec.LookPath("lxc-start") + if err != nil { + return err + } + targetPath := path.Join(root, "lxc-start-unconfined") + + if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + if err := os.Remove(targetPath); err != nil { + return err + } + } + return os.Symlink(sourcePath, targetPath) +} + +// TODO: This can be moved to the mountinfo reader in the mount pkg +func rootIsShared() bool { + if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { + for _, line := range strings.Split(string(data), "\n") { + cols := strings.Split(line, " ") + if len(cols) >= 6 && cols[4] == "/" { + return strings.HasPrefix(cols[6], "shared") + } + } + } + + // No idea, probably safe to assume so + return true +} + +func (d *driver) containerDir(containerId string) string { + return path.Join(d.libPath, "containers", containerId) +} + +func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { + root := path.Join(d.containerDir(c.ID), "config.lxc") + + fo, err := os.Create(root) + if err != nil { + return "", err + } + defer fo.Close() + + if err := LxcTemplateCompiled.Execute(fo, struct { + *execdriver.Command + AppArmor bool + }{ + Command: c, + AppArmor: d.apparmor, + }); err != nil { + return "", err + } + + return root, nil +} + +func (d *driver) generateEnvConfig(c *execdriver.Command) error { + data, err := json.Marshal(c.ProcessConfig.Env) + if err != nil { + return err + } + p := path.Join(d.libPath, "containers", c.ID, "config.env") + c.Mounts = append(c.Mounts, execdriver.Mount{ + Source: p, + Destination: "/.dockerenv", + Writable: false, + Private: true, + }) + + return ioutil.WriteFile(p, data, 0600) +} + +// Clean not implemented for lxc +func (d *driver) Clean(id string) error { + return nil +} + +type TtyConsole struct { + MasterPty *os.File + SlavePty *os.File +} + +func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) { + // lxc is special in that we cannot create the master outside of the container without + // opening the slave because we have nothing to provide to the cmd. We have to open both then do + // the crazy setup on command right now instead of passing the console path to lxc and telling it + // to open up that console. we save a couple of openfiles in the native driver because we can do + // this. + ptyMaster, ptySlave, err := pty.Open() + if err != nil { + return nil, err + } + + tty := &TtyConsole{ + MasterPty: ptyMaster, + SlavePty: ptySlave, + } + + if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil { + tty.Close() + return nil, err + } + + processConfig.Console = tty.SlavePty.Name() + + return tty, nil +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error { + command.Stdout = t.SlavePty + command.Stderr = t.SlavePty + + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + + io.Copy(pipes.Stdout, t.MasterPty) + }() + + if pipes.Stdin != nil { + command.Stdin = t.SlavePty + command.SysProcAttr.Setctty = true + + go func() { + io.Copy(t.MasterPty, pipes.Stdin) + + pipes.Stdin.Close() + }() + } + return nil +} + +func (t *TtyConsole) Close() error { + t.SlavePty.Close() + return t.MasterPty.Close() +} + +func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + return -1, ErrExec +} + +func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) { + if _, ok := d.activeContainers[id]; !ok { + return nil, fmt.Errorf("%s is not a key in active containers", id) + } + return execdriver.Stats(d.containerDir(id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory) +} diff --git a/daemon/execdriver/lxc/info.go b/daemon/execdriver/lxc/info.go new file mode 100644 index 00000000..279211f3 --- /dev/null +++ b/daemon/execdriver/lxc/info.go @@ -0,0 +1,52 @@ +// +build linux + +package lxc + +import ( + "bufio" + "errors" + "strconv" + "strings" +) + +var ( + ErrCannotParse = errors.New("cannot parse raw input") +) + +type lxcInfo struct { + Running bool + Pid int +} + +func parseLxcInfo(raw string) (*lxcInfo, error) { + if raw == "" { + return nil, ErrCannotParse + } + var ( + err error + s = bufio.NewScanner(strings.NewReader(raw)) + info = &lxcInfo{} + ) + for s.Scan() { + text := s.Text() + + if s.Err() != nil { + return nil, s.Err() + } + + parts := strings.Split(text, ":") + if len(parts) < 2 { + continue + } + switch strings.ToLower(strings.TrimSpace(parts[0])) { + case "state": + info.Running = strings.TrimSpace(parts[1]) == "RUNNING" + case "pid": + info.Pid, err = strconv.Atoi(strings.TrimSpace(parts[1])) + if err != nil { + return nil, err + } + } + } + return info, nil +} diff --git a/daemon/execdriver/lxc/info_test.go b/daemon/execdriver/lxc/info_test.go new file mode 100644 index 00000000..996d56b2 --- /dev/null +++ b/daemon/execdriver/lxc/info_test.go @@ -0,0 +1,38 @@ +// +build linux + +package lxc + +import ( + "testing" +) + +func TestParseRunningInfo(t *testing.T) { + raw := ` + state: RUNNING + pid: 50` + + info, err := parseLxcInfo(raw) + if err != nil { + t.Fatal(err) + } + if !info.Running { + t.Fatal("info should return a running state") + } + if info.Pid != 50 { + t.Fatalf("info should have pid 50 got %d", info.Pid) + } +} + +func TestEmptyInfo(t *testing.T) { + _, err := parseLxcInfo("") + if err == nil { + t.Fatal("error should not be nil") + } +} + +func TestBadInfo(t *testing.T) { + _, err := parseLxcInfo("state") + if err != nil { + t.Fatal(err) + } +} diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go new file mode 100644 index 00000000..aba1c74c --- /dev/null +++ b/daemon/execdriver/lxc/init.go @@ -0,0 +1,145 @@ +// +build linux + +package lxc + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/reexec" +) + +// Args provided to the init function for a driver +type InitArgs struct { + User string + Gateway string + Ip string + WorkDir string + Privileged bool + Env []string + Args []string + Mtu int + Console string + Pipe int + Root string + CapAdd string + CapDrop string +} + +func init() { + // like always lxc requires a hack to get this to work + reexec.Register("/.dockerinit", dockerInititalizer) +} + +func dockerInititalizer() { + initializer() +} + +// initializer is the lxc driver's init function that is run inside the namespace to setup +// additional configurations +func initializer() { + runtime.LockOSThread() + + args := getArgs() + + if err := setupNamespace(args); err != nil { + logrus.Fatal(err) + } +} + +func setupNamespace(args *InitArgs) error { + if err := setupEnv(args); err != nil { + return err + } + + if err := finalizeNamespace(args); err != nil { + return err + } + + path, err := exec.LookPath(args.Args[0]) + if err != nil { + logrus.Infof("Unable to locate %v", args.Args[0]) + os.Exit(127) + } + + if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { + return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) + } + + return nil +} + +func getArgs() *InitArgs { + var ( + // Get cmdline arguments + user = flag.String("u", "", "username or uid") + gateway = flag.String("g", "", "gateway address") + ip = flag.String("i", "", "ip address") + workDir = flag.String("w", "", "workdir") + privileged = flag.Bool("privileged", false, "privileged mode") + mtu = flag.Int("mtu", 1500, "interface mtu") + capAdd = flag.String("cap-add", "", "capabilities to add") + capDrop = flag.String("cap-drop", "", "capabilities to drop") + ) + + flag.Parse() + + return &InitArgs{ + User: *user, + Gateway: *gateway, + Ip: *ip, + WorkDir: *workDir, + Privileged: *privileged, + Args: flag.Args(), + Mtu: *mtu, + CapAdd: *capAdd, + CapDrop: *capDrop, + } +} + +// Clear environment pollution introduced by lxc-start +func setupEnv(args *InitArgs) error { + // Get env + var env []string + dockerenv, err := os.Open(".dockerenv") + if err != nil { + return fmt.Errorf("Unable to load environment variables: %v", err) + } + defer dockerenv.Close() + if err := json.NewDecoder(dockerenv).Decode(&env); err != nil { + return fmt.Errorf("Unable to decode environment variables: %v", err) + } + // Propagate the plugin-specific container env variable + env = append(env, "container="+os.Getenv("container")) + + args.Env = env + + os.Clearenv() + for _, kv := range args.Env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) == 1 { + parts = append(parts, "") + } + os.Setenv(parts[0], parts[1]) + } + + return nil +} + +// Setup working directory +func setupWorkingDirectory(args *InitArgs) error { + if args.WorkDir == "" { + return nil + } + if err := syscall.Chdir(args.WorkDir); err != nil { + return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err) + } + return nil +} diff --git a/daemon/execdriver/lxc/lxc_init_linux.go b/daemon/execdriver/lxc/lxc_init_linux.go new file mode 100644 index 00000000..c63a0cbb --- /dev/null +++ b/daemon/execdriver/lxc/lxc_init_linux.go @@ -0,0 +1,22 @@ +// +build linux + +package lxc + +import ( + "fmt" + + "github.com/opencontainers/runc/libcontainer/utils" +) + +func finalizeNamespace(args *InitArgs) error { + if err := utils.CloseExecFrom(3); err != nil { + return err + } + if err := setupUser(args.User); err != nil { + return fmt.Errorf("setup user %s", err) + } + if err := setupWorkingDirectory(args); err != nil { + return err + } + return nil +} diff --git a/daemon/execdriver/lxc/lxc_init_unsupported.go b/daemon/execdriver/lxc/lxc_init_unsupported.go new file mode 100644 index 00000000..3b7be139 --- /dev/null +++ b/daemon/execdriver/lxc/lxc_init_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package lxc + +func finalizeNamespace(args *InitArgs) error { + panic("Not supported on this platform") +} diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go new file mode 100644 index 00000000..a4ac51ea --- /dev/null +++ b/daemon/execdriver/lxc/lxc_template.go @@ -0,0 +1,261 @@ +// +build linux + +package lxc + +import ( + "fmt" + "os" + "strings" + "text/template" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template" + "github.com/docker/docker/pkg/stringutils" + "github.com/opencontainers/runc/libcontainer/label" +) + +const LxcTemplate = ` +lxc.network.type = none +# root filesystem +{{$ROOTFS := .Rootfs}} +lxc.rootfs = {{$ROOTFS}} + +# use a dedicated pts for the container (and limit the number of pseudo terminal +# available) +lxc.pts = 1024 + +# disable the main console +lxc.console = none + +# no controlling tty at all +lxc.tty = 1 + +{{if .ProcessConfig.Privileged}} +lxc.cgroup.devices.allow = a +{{else}} +# no implicit access to devices +lxc.cgroup.devices.deny = a +#Allow the devices passed to us in the AllowedDevices list. +{{range $allowedDevice := .AllowedDevices}} +lxc.cgroup.devices.allow = {{$allowedDevice.CgroupString}} +{{end}} +{{end}} + +# standard mount point +# Use mnt.putold as per https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/986385 +lxc.pivotdir = lxc_putold + +# lxc.autodev is not compatible with lxc --device switch +lxc.autodev = 0 + +# NOTICE: These mounts must be applied within the namespace +{{if .ProcessConfig.Privileged}} +# WARNING: mounting procfs and/or sysfs read-write is a known attack vector. +# See e.g. http://blog.zx2c4.com/749 and https://bit.ly/T9CkqJ +# We mount them read-write here, but later, dockerinit will call the Restrict() function to remount them read-only. +# We cannot mount them directly read-only, because that would prevent loading AppArmor profiles. +lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 +lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 + {{if .AppArmor}} +lxc.aa_profile = unconfined + {{end}} +{{else}} +# In non-privileged mode, lxc will automatically mount /proc and /sys in readonly mode +# for security. See: http://man7.org/linux/man-pages/man5/lxc.container.conf.5.html +lxc.mount.auto = proc sys + {{if .AppArmorProfile}} +lxc.aa_profile = {{.AppArmorProfile}} + {{end}} +{{end}} + +{{if .ProcessConfig.Tty}} +lxc.mount.entry = {{.ProcessConfig.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw,create=file 0 0 +{{end}} + +lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec,create=dir" ""}} 0 0 +lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec,create=dir" ""}} 0 0 + +{{range $value := .Mounts}} +{{$createVal := isDirectory $value.Source}} +{{if $value.Writable}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw,create={{$createVal}} 0 0 +{{else}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro,create={{$createVal}} 0 0 +{{end}} +{{end}} + +# limits +{{if .Resources}} +{{if .Resources.Memory}} +lxc.cgroup.memory.limit_in_bytes = {{.Resources.Memory}} +lxc.cgroup.memory.soft_limit_in_bytes = {{.Resources.Memory}} +{{with $memSwap := getMemorySwap .Resources}} +lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} +{{end}} +{{end}} +{{if .Resources.CpuShares}} +lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} +{{end}} +{{if .Resources.CpuPeriod}} +lxc.cgroup.cpu.cfs_period_us = {{.Resources.CpuPeriod}} +{{end}} +{{if .Resources.CpusetCpus}} +lxc.cgroup.cpuset.cpus = {{.Resources.CpusetCpus}} +{{end}} +{{if .Resources.CpusetMems}} +lxc.cgroup.cpuset.mems = {{.Resources.CpusetMems}} +{{end}} +{{if .Resources.CpuQuota}} +lxc.cgroup.cpu.cfs_quota_us = {{.Resources.CpuQuota}} +{{end}} +{{if .Resources.BlkioWeight}} +lxc.cgroup.blkio.weight = {{.Resources.BlkioWeight}} +{{end}} +{{if .Resources.OomKillDisable}} +lxc.cgroup.memory.oom_control = {{.Resources.OomKillDisable}} +{{end}} +{{if gt .Resources.MemorySwappiness 0}} +lxc.cgroup.memory.swappiness = {{.Resources.MemorySwappiness}} +{{end}} +{{end}} + +{{if .LxcConfig}} +{{range $value := .LxcConfig}} +lxc.{{$value}} +{{end}} +{{end}} + +{{if .Network.Interface}} +{{if .Network.Interface.IPAddress}} +lxc.network.ipv4 = {{.Network.Interface.IPAddress}}/{{.Network.Interface.IPPrefixLen}} +{{end}} +{{if .Network.Interface.Gateway}} +lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}} +{{end}} +{{if .Network.Interface.MacAddress}} +lxc.network.hwaddr = {{.Network.Interface.MacAddress}} +{{end}} +{{end}} +{{if .ProcessConfig.Env}} +lxc.utsname = {{getHostname .ProcessConfig.Env}} +{{end}} + +{{if .ProcessConfig.Privileged}} +# No cap values are needed, as lxc is starting in privileged mode +{{else}} + {{ with keepCapabilities .CapAdd .CapDrop }} + {{range .}} +lxc.cap.keep = {{.}} + {{end}} + {{else}} + {{ with dropList .CapDrop }} + {{range .}} +lxc.cap.drop = {{.}} + {{end}} + {{end}} + {{end}} +{{end}} +` + +var LxcTemplateCompiled *template.Template + +// Escape spaces in strings according to the fstab documentation, which is the +// format for "lxc.mount.entry" lines in lxc.conf. See also "man 5 fstab". +func escapeFstabSpaces(field string) string { + return strings.Replace(field, " ", "\\040", -1) +} + +func keepCapabilities(adds []string, drops []string) ([]string, error) { + container := nativeTemplate.New() + logrus.Debugf("adds %s drops %s\n", adds, drops) + caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops) + if err != nil { + return nil, err + } + var newCaps []string + for _, cap := range caps { + logrus.Debugf("cap %s\n", cap) + realCap := execdriver.GetCapability(cap) + numCap := fmt.Sprintf("%d", realCap.Value) + newCaps = append(newCaps, numCap) + } + + return newCaps, nil +} + +func dropList(drops []string) ([]string, error) { + if stringutils.InSlice(drops, "all") { + var newCaps []string + for _, capName := range execdriver.GetAllCapabilities() { + cap := execdriver.GetCapability(capName) + logrus.Debugf("drop cap %s\n", cap.Key) + numCap := fmt.Sprintf("%d", cap.Value) + newCaps = append(newCaps, numCap) + } + return newCaps, nil + } + return []string{}, nil +} + +func isDirectory(source string) string { + f, err := os.Stat(source) + logrus.Debugf("dir: %s\n", source) + if err != nil { + if os.IsNotExist(err) { + return "dir" + } + return "" + } + if f.IsDir() { + return "dir" + } + return "file" +} + +func getMemorySwap(v *execdriver.Resources) int64 { + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to `-1'. + if v.MemorySwap < 0 { + return 0 + } + return v.Memory * 2 +} + +func getLabel(c map[string][]string, name string) string { + label := c["label"] + for _, l := range label { + parts := strings.SplitN(l, "=", 2) + if strings.TrimSpace(parts[0]) == name { + return strings.TrimSpace(parts[1]) + } + } + return "" +} + +func getHostname(env []string) string { + for _, kv := range env { + parts := strings.SplitN(kv, "=", 2) + if parts[0] == "HOSTNAME" && len(parts) == 2 { + return parts[1] + } + } + return "" +} + +func init() { + var err error + funcMap := template.FuncMap{ + "getMemorySwap": getMemorySwap, + "escapeFstabSpaces": escapeFstabSpaces, + "formatMountLabel": label.FormatMountLabel, + "isDirectory": isDirectory, + "keepCapabilities": keepCapabilities, + "dropList": dropList, + "getHostname": getHostname, + } + LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) + if err != nil { + panic(err) + } +} diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go new file mode 100644 index 00000000..2d4dba5c --- /dev/null +++ b/daemon/execdriver/lxc/lxc_template_unit_test.go @@ -0,0 +1,346 @@ +// +build linux + +package lxc + +import ( + "bufio" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/docker/docker/daemon/execdriver" + nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/syndtr/gocapability/capability" +) + +func TestLXCConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestLXCConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + // Memory is allocated randomly for testing + r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + var ( + memMin = 33554432 + memMax = 536870912 + mem = memMin + r.Intn(memMax-memMin) + cpuMin = 100 + cpuMax = 10000 + cpu = cpuMin + r.Intn(cpuMax-cpuMin) + ) + + driver, err := NewDriver(root, root, "", false) + if err != nil { + t.Fatal(err) + } + command := &execdriver.Command{ + ID: "1", + Resources: &execdriver.Resources{ + Memory: int64(mem), + CpuShares: int64(cpu), + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + AllowedDevices: make([]*configs.Device, 0), + ProcessConfig: execdriver.ProcessConfig{}, + } + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + grepFile(t, p, + fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) + + grepFile(t, p, + fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) +} + +func TestCustomLxcConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + driver, err := NewDriver(root, root, "", false) + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.utsname = docker", + "lxc.cgroup.cpuset.cpus = 0,1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + ProcessConfig: processConfig, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + + grepFile(t, p, "lxc.utsname = docker") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") +} + +func grepFile(t *testing.T, path string, pattern string) { + grepFileWithReverse(t, path, pattern, false) +} + +func grepFileWithReverse(t *testing.T, path string, pattern string, inverseGrep bool) { + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := bufio.NewReader(f) + var ( + line string + ) + err = nil + for err == nil { + line, err = r.ReadString('\n') + if strings.Contains(line, pattern) == true { + if inverseGrep { + t.Fatalf("grepFile: pattern \"%s\" found in \"%s\"", pattern, path) + } + return + } + } + if inverseGrep { + return + } + t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) +} + +func TestEscapeFstabSpaces(t *testing.T) { + var testInputs = map[string]string{ + " ": "\\040", + "": "", + "/double space": "/double\\040\\040space", + "/some long test string": "/some\\040long\\040test\\040string", + "/var/lib/docker": "/var/lib/docker", + " leading": "\\040leading", + "trailing ": "trailing\\040", + } + for in, exp := range testInputs { + if out := escapeFstabSpaces(in); exp != out { + t.Logf("Expected %s got %s", exp, out) + t.Fail() + } + } +} + +func TestIsDirectory(t *testing.T) { + tempDir, err := ioutil.TempDir("", "TestIsDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile") + if err != nil { + t.Fatal(err) + } + + if isDirectory(tempDir) != "dir" { + t.Logf("Could not identify %s as a directory", tempDir) + t.Fail() + } + + if isDirectory(tempFile.Name()) != "file" { + t.Logf("Could not identify %s as a file", tempFile.Name()) + t.Fail() + } +} + +func TestCustomLxcConfigMounts(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + tempDir, err := ioutil.TempDir("", "TestIsDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile") + if err != nil { + t.Fatal(err) + } + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + driver, err := NewDriver(root, root, "", false) + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + mounts := []execdriver.Mount{ + { + Source: tempDir, + Destination: tempDir, + Writable: false, + Private: true, + }, + { + Source: tempFile.Name(), + Destination: tempFile.Name(), + Writable: true, + Private: true, + }, + } + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.utsname = docker", + "lxc.cgroup.cpuset.cpus = 0,1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + Mounts: mounts, + ProcessConfig: processConfig, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + + grepFile(t, p, "lxc.utsname = docker") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") + + grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,ro,create=%s 0 0", tempDir, "/"+tempDir, "dir")) + grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,rw,create=%s 0 0", tempFile.Name(), "/"+tempFile.Name(), "file")) +} + +func TestCustomLxcConfigMisc(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + driver, err := NewDriver(root, root, "", true) + + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + + processConfig.Env = []string{"HOSTNAME=testhost"} + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.cgroup.cpuset.cpus = 0,1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + ProcessConfig: processConfig, + CapAdd: []string{"net_admin", "syslog"}, + CapDrop: []string{"kill", "mknod"}, + AppArmorProfile: "lxc-container-default-with-nesting", + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + grepFile(t, p, "lxc.aa_profile = lxc-container-default-with-nesting") + // hostname + grepFile(t, p, "lxc.utsname = testhost") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") + container := nativeTemplate.New() + for _, cap := range container.Capabilities { + realCap := execdriver.GetCapability(cap) + numCap := fmt.Sprintf("%d", realCap.Value) + if cap != "MKNOD" && cap != "KILL" { + grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap)) + } + } + + grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true) + grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true) +} + +func TestCustomLxcConfigMiscOverride(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + driver, err := NewDriver(root, root, "", false) + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + + processConfig.Env = []string{"HOSTNAME=testhost"} + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.cgroup.cpuset.cpus = 0,1", + "lxc.network.ipv4 = 172.0.0.1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + ProcessConfig: processConfig, + CapAdd: []string{"NET_ADMIN", "SYSLOG"}, + CapDrop: []string{"KILL", "MKNOD"}, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + + // hostname + grepFile(t, p, "lxc.utsname = testhost") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") + container := nativeTemplate.New() + for _, cap := range container.Capabilities { + realCap := execdriver.GetCapability(cap) + numCap := fmt.Sprintf("%d", realCap.Value) + if cap != "MKNOD" && cap != "KILL" { + grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap)) + } + } + grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true) + grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true) +} diff --git a/daemon/execdriver/native/apparmor.go b/daemon/execdriver/native/apparmor.go new file mode 100644 index 00000000..30d49b37 --- /dev/null +++ b/daemon/execdriver/native/apparmor.go @@ -0,0 +1,145 @@ +// +build linux + +package native + +import ( + "bufio" + "fmt" + "io" + "os" + "os/exec" + "path" + "strings" + "text/template" + + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +const ( + apparmorProfilePath = "/etc/apparmor.d/docker" +) + +type data struct { + Name string + Imports []string + InnerImports []string +} + +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + deny @{PROC}/sys/fs/** wklx, + deny @{PROC}/fs/** wklx, + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/kcore rwklx, + deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, + deny @{PROC}/sys/kernel/*/** wklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, +} +` + +func generateProfile(out io.Writer) error { + compiled, err := template.New("apparmor_profile").Parse(baseTemplate) + if err != nil { + return err + } + data := &data{ + Name: "docker-default", + } + if tunablesExists() { + data.Imports = append(data.Imports, "#include ") + } else { + data.Imports = append(data.Imports, "@{PROC}=/proc/") + } + if abstractionsExists() { + data.InnerImports = append(data.InnerImports, "#include ") + } + if err := compiled.Execute(out, data); err != nil { + return err + } + return nil +} + +// check if the tunables/global exist +func tunablesExists() bool { + _, err := os.Stat("/etc/apparmor.d/tunables/global") + return err == nil +} + +// check if abstractions/base exist +func abstractionsExists() bool { + _, err := os.Stat("/etc/apparmor.d/abstractions/base") + return err == nil +} + +func installAppArmorProfile() error { + if !apparmor.IsEnabled() { + return nil + } + + // Make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { + return err + } + + f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := generateProfile(f); err != nil { + f.Close() + return err + } + f.Close() + + cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker") + // to use the parser directly we have to make sure we are in the correct + // dir with the profile + cmd.Dir = "/etc/apparmor.d" + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output) + } + return nil +} + +func hasAppArmorProfileLoaded(profile string) error { + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + return err + } + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err != nil { + return err + } + if strings.HasPrefix(p, profile+" ") { + return nil + } + } +} diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go new file mode 100644 index 00000000..bbc83fed --- /dev/null +++ b/daemon/execdriver/native/create.go @@ -0,0 +1,265 @@ +// +build linux,cgo + +package native + +import ( + "errors" + "fmt" + "net" + "strings" + "syscall" + + "github.com/docker/docker/daemon/execdriver" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/utils" +) + +// createContainer populates and configures the container type with the +// data provided by the execdriver.Command +func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error) { + container := execdriver.InitContainer(c) + + if err := d.createIpc(container, c); err != nil { + return nil, err + } + + if err := d.createPid(container, c); err != nil { + return nil, err + } + + if err := d.createUTS(container, c); err != nil { + return nil, err + } + + if err := d.createNetwork(container, c); err != nil { + return nil, err + } + + if c.ProcessConfig.Privileged { + if !container.Readonlyfs { + // clear readonly for /sys + for i := range container.Mounts { + if container.Mounts[i].Destination == "/sys" { + container.Mounts[i].Flags &= ^syscall.MS_RDONLY + } + } + container.ReadonlyPaths = nil + } + + // clear readonly for cgroup + for i := range container.Mounts { + if container.Mounts[i].Device == "cgroup" { + container.Mounts[i].Flags &= ^syscall.MS_RDONLY + } + } + + container.MaskPaths = nil + if err := d.setPrivileged(container); err != nil { + return nil, err + } + } else { + if err := d.setCapabilities(container, c); err != nil { + return nil, err + } + } + + container.AdditionalGroups = c.GroupAdd + + if c.AppArmorProfile != "" { + container.AppArmorProfile = c.AppArmorProfile + } + + if err := execdriver.SetupCgroups(container, c); err != nil { + return nil, err + } + + if container.Readonlyfs { + for i := range container.Mounts { + switch container.Mounts[i].Destination { + case "/proc", "/dev", "/dev/pts": + continue + } + container.Mounts[i].Flags |= syscall.MS_RDONLY + } + + /* These paths must be remounted as r/o */ + container.ReadonlyPaths = append(container.ReadonlyPaths, "/dev") + } + + if err := d.setupMounts(container, c); err != nil { + return nil, err + } + + d.setupLabels(container, c) + d.setupRlimits(container, c) + return container, nil +} + +func generateIfaceName() (string, error) { + for i := 0; i < 10; i++ { + name, err := utils.GenerateRandomName("veth", 7) + if err != nil { + continue + } + if _, err := net.InterfaceByName(name); err != nil { + if strings.Contains(err.Error(), "no such") { + return name, nil + } + return "", err + } + } + return "", errors.New("Failed to find name for new interface") +} + +func (d *driver) createNetwork(container *configs.Config, c *execdriver.Command) error { + if c.Network == nil { + return nil + } + if c.Network.ContainerID != "" { + d.Lock() + active := d.activeContainers[c.Network.ContainerID] + d.Unlock() + + if active == nil { + return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) + } + + state, err := active.State() + if err != nil { + return err + } + + container.Namespaces.Add(configs.NEWNET, state.NamespacePaths[configs.NEWNET]) + return nil + } + + if c.Network.NamespacePath == "" { + return fmt.Errorf("network namespace path is empty") + } + + container.Namespaces.Add(configs.NEWNET, c.Network.NamespacePath) + return nil +} + +func (d *driver) createIpc(container *configs.Config, c *execdriver.Command) error { + if c.Ipc.HostIpc { + container.Namespaces.Remove(configs.NEWIPC) + return nil + } + + if c.Ipc.ContainerID != "" { + d.Lock() + active := d.activeContainers[c.Ipc.ContainerID] + d.Unlock() + + if active == nil { + return fmt.Errorf("%s is not a valid running container to join", c.Ipc.ContainerID) + } + + state, err := active.State() + if err != nil { + return err + } + container.Namespaces.Add(configs.NEWIPC, state.NamespacePaths[configs.NEWIPC]) + } + + return nil +} + +func (d *driver) createPid(container *configs.Config, c *execdriver.Command) error { + if c.Pid.HostPid { + container.Namespaces.Remove(configs.NEWPID) + return nil + } + + return nil +} + +func (d *driver) createUTS(container *configs.Config, c *execdriver.Command) error { + if c.UTS.HostUTS { + container.Namespaces.Remove(configs.NEWUTS) + container.Hostname = "" + return nil + } + + return nil +} + +func (d *driver) setPrivileged(container *configs.Config) (err error) { + container.Capabilities = execdriver.GetAllCapabilities() + container.Cgroups.AllowAllDevices = true + + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + container.Devices = hostDevices + + if apparmor.IsEnabled() { + container.AppArmorProfile = "unconfined" + } + return nil +} + +func (d *driver) setCapabilities(container *configs.Config, c *execdriver.Command) (err error) { + container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop) + return err +} + +func (d *driver) setupRlimits(container *configs.Config, c *execdriver.Command) { + if c.Resources == nil { + return + } + + for _, rlimit := range c.Resources.Rlimits { + container.Rlimits = append(container.Rlimits, configs.Rlimit{ + Type: rlimit.Type, + Hard: rlimit.Hard, + Soft: rlimit.Soft, + }) + } +} + +func (d *driver) setupMounts(container *configs.Config, c *execdriver.Command) error { + userMounts := make(map[string]struct{}) + for _, m := range c.Mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overriden by user supplied mounts + var defaultMounts []*configs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range container.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + container.Mounts = defaultMounts + + for _, m := range c.Mounts { + flags := syscall.MS_BIND | syscall.MS_REC + if !m.Writable { + flags |= syscall.MS_RDONLY + } + if m.Slave { + flags |= syscall.MS_SLAVE + } + container.Mounts = append(container.Mounts, &configs.Mount{ + Source: m.Source, + Destination: m.Destination, + Device: "bind", + Flags: flags, + }) + } + return nil +} + +func (d *driver) setupLabels(container *configs.Config, c *execdriver.Command) { + container.ProcessLabel = c.ProcessLabel + container.MountLabel = c.MountLabel +} diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go new file mode 100644 index 00000000..c5d4d964 --- /dev/null +++ b/daemon/execdriver/native/driver.go @@ -0,0 +1,457 @@ +// +build linux,cgo + +package native + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/reexec" + sysinfo "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/term" + "github.com/opencontainers/runc/libcontainer" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/cgroups/systemd" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/runc/libcontainer/utils" +) + +const ( + DriverName = "native" + Version = "0.2" +) + +type driver struct { + root string + initPath string + activeContainers map[string]libcontainer.Container + machineMemory int64 + factory libcontainer.Factory + sync.Mutex +} + +func NewDriver(root, initPath string, options []string) (*driver, error) { + meminfo, err := sysinfo.ReadMemInfo() + if err != nil { + return nil, err + } + + if err := sysinfo.MkdirAll(root, 0700); err != nil { + return nil, err + } + + if apparmor.IsEnabled() { + if err := installAppArmorProfile(); err != nil { + apparmorProfiles := []string{"docker-default"} + + // Allow daemon to run if loading failed, but are active + // (possibly through another run, manually, or via system startup) + for _, policy := range apparmorProfiles { + if err := hasAppArmorProfileLoaded(policy); err != nil { + return nil, fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy) + } + } + } + } + + // choose cgroup manager + // this makes sure there are no breaking changes to people + // who upgrade from versions without native.cgroupdriver opt + cgm := libcontainer.Cgroupfs + if systemd.UseSystemd() { + cgm = libcontainer.SystemdCgroups + } + + // parse the options + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "native.cgroupdriver": + // override the default if they set options + switch val { + case "systemd": + if systemd.UseSystemd() { + cgm = libcontainer.SystemdCgroups + } else { + // warn them that they chose the wrong driver + logrus.Warn("You cannot use systemd as native.cgroupdriver, using cgroupfs instead") + } + case "cgroupfs": + cgm = libcontainer.Cgroupfs + default: + return nil, fmt.Errorf("Unknown native.cgroupdriver given %q. try cgroupfs or systemd", val) + } + default: + return nil, fmt.Errorf("Unknown option %s\n", key) + } + } + + f, err := libcontainer.New( + root, + cgm, + libcontainer.InitPath(reexec.Self(), DriverName), + ) + if err != nil { + return nil, err + } + + return &driver{ + root: root, + initPath: initPath, + activeContainers: make(map[string]libcontainer.Container), + machineMemory: meminfo.MemTotal, + factory: f, + }, nil +} + +type execOutput struct { + exitCode int + err error +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { + // take the Command and populate the libcontainer.Config from it + container, err := d.createContainer(c) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + p := &libcontainer.Process{ + Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...), + Env: c.ProcessConfig.Env, + Cwd: c.WorkingDir, + User: c.ProcessConfig.User, + } + + if err := setupPipes(container, &c.ProcessConfig, p, pipes); err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + cont, err := d.factory.Create(c.ID, container) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + d.Lock() + d.activeContainers[c.ID] = cont + d.Unlock() + defer func() { + cont.Destroy() + d.cleanContainer(c.ID) + }() + + if err := cont.Start(p); err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + if startCallback != nil { + pid, err := p.Pid() + if err != nil { + p.Signal(os.Kill) + p.Wait() + return execdriver.ExitStatus{ExitCode: -1}, err + } + startCallback(&c.ProcessConfig, pid) + } + + oom := notifyOnOOM(cont) + waitF := p.Wait + if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) { + // we need such hack for tracking processes with inherited fds, + // because cmd.Wait() waiting for all streams to be copied + waitF = waitInPIDHost(p, cont) + } + ps, err := waitF() + if err != nil { + execErr, ok := err.(*exec.ExitError) + if !ok { + return execdriver.ExitStatus{ExitCode: -1}, err + } + ps = execErr.ProcessState + } + cont.Destroy() + _, oomKill := <-oom + return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil +} + +// notifyOnOOM returns a channel that signals if the container received an OOM notification +// for any process. If it is unable to subscribe to OOM notifications then a closed +// channel is returned as it will be non-blocking and return the correct result when read. +func notifyOnOOM(container libcontainer.Container) <-chan struct{} { + oom, err := container.NotifyOOM() + if err != nil { + logrus.Warnf("Your kernel does not support OOM notifications: %s", err) + c := make(chan struct{}) + close(c) + return c + } + return oom +} + +func killCgroupProcs(c libcontainer.Container) { + var procs []*os.Process + if err := c.Pause(); err != nil { + logrus.Warn(err) + } + pids, err := c.Processes() + if err != nil { + // don't care about childs if we can't get them, this is mostly because cgroup already deleted + logrus.Warnf("Failed to get processes from container %s: %v", c.ID(), err) + } + for _, pid := range pids { + if p, err := os.FindProcess(pid); err == nil { + procs = append(procs, p) + if err := p.Kill(); err != nil { + logrus.Warn(err) + } + } + } + if err := c.Resume(); err != nil { + logrus.Warn(err) + } + for _, p := range procs { + if _, err := p.Wait(); err != nil { + logrus.Warn(err) + } + } +} + +func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*os.ProcessState, error) { + return func() (*os.ProcessState, error) { + pid, err := p.Pid() + if err != nil { + return nil, err + } + + process, err := os.FindProcess(pid) + s, err := process.Wait() + if err != nil { + execErr, ok := err.(*exec.ExitError) + if !ok { + return s, err + } + s = execErr.ProcessState + } + killCgroupProcs(c) + p.Wait() + return s, err + } +} + +func (d *driver) Kill(c *execdriver.Command, sig int) error { + d.Lock() + active := d.activeContainers[c.ID] + d.Unlock() + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + state, err := active.State() + if err != nil { + return err + } + return syscall.Kill(state.InitProcessPid, syscall.Signal(sig)) +} + +func (d *driver) Pause(c *execdriver.Command) error { + d.Lock() + active := d.activeContainers[c.ID] + d.Unlock() + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + return active.Pause() +} + +func (d *driver) Unpause(c *execdriver.Command) error { + d.Lock() + active := d.activeContainers[c.ID] + d.Unlock() + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + return active.Resume() +} + +func (d *driver) Terminate(c *execdriver.Command) error { + defer d.cleanContainer(c.ID) + container, err := d.factory.Load(c.ID) + if err != nil { + return err + } + defer container.Destroy() + state, err := container.State() + if err != nil { + return err + } + pid := state.InitProcessPid + currentStartTime, err := system.GetProcessStartTime(pid) + if err != nil { + return err + } + if state.InitProcessStartTime == currentStartTime { + err = syscall.Kill(pid, 9) + syscall.Wait4(pid, nil, 0, nil) + } + return err +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (d *driver) Name() string { + return fmt.Sprintf("%s-%s", DriverName, Version) +} + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + d.Lock() + active := d.activeContainers[id] + d.Unlock() + + if active == nil { + return nil, fmt.Errorf("active container for %s does not exist", id) + } + return active.Processes() +} + +func (d *driver) cleanContainer(id string) error { + d.Lock() + delete(d.activeContainers, id) + d.Unlock() + return os.RemoveAll(filepath.Join(d.root, id)) +} + +func (d *driver) createContainerRoot(id string) error { + return os.MkdirAll(filepath.Join(d.root, id), 0655) +} + +func (d *driver) Clean(id string) error { + return os.RemoveAll(filepath.Join(d.root, id)) +} + +func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) { + d.Lock() + c := d.activeContainers[id] + d.Unlock() + if c == nil { + return nil, execdriver.ErrNotRunning + } + now := time.Now() + stats, err := c.Stats() + if err != nil { + return nil, err + } + memoryLimit := c.Config().Cgroups.Memory + // if the container does not have any memory limit specified set the + // limit to the machines memory + if memoryLimit == 0 { + memoryLimit = d.machineMemory + } + return &execdriver.ResourceStats{ + Stats: stats, + Read: now, + MemoryLimit: memoryLimit, + }, nil +} + +type TtyConsole struct { + console libcontainer.Console +} + +func NewTtyConsole(console libcontainer.Console, pipes *execdriver.Pipes) (*TtyConsole, error) { + tty := &TtyConsole{ + console: console, + } + + if err := tty.AttachPipes(pipes); err != nil { + tty.Close() + return nil, err + } + + return tty, nil +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.console.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(pipes *execdriver.Pipes) error { + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + + pools.Copy(pipes.Stdout, t.console) + }() + + if pipes.Stdin != nil { + go func() { + pools.Copy(t.console, pipes.Stdin) + + pipes.Stdin.Close() + }() + } + + return nil +} + +func (t *TtyConsole) Close() error { + return t.console.Close() +} + +func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes) error { + var term execdriver.Terminal + var err error + + if processConfig.Tty { + rootuid, err := container.HostUID() + if err != nil { + return err + } + cons, err := p.NewConsole(rootuid) + if err != nil { + return err + } + term, err = NewTtyConsole(cons, pipes) + } else { + p.Stdout = pipes.Stdout + p.Stderr = pipes.Stderr + r, w, err := os.Pipe() + if err != nil { + return err + } + if pipes.Stdin != nil { + go func() { + io.Copy(w, pipes.Stdin) + w.Close() + }() + p.Stdin = r + } + term = &execdriver.StdConsole{} + } + if err != nil { + return err + } + processConfig.Terminal = term + return nil +} diff --git a/daemon/execdriver/native/driver_unsupported.go b/daemon/execdriver/native/driver_unsupported.go new file mode 100644 index 00000000..97839cf3 --- /dev/null +++ b/daemon/execdriver/native/driver_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package native + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func NewDriver(root, initPath string) (execdriver.Driver, error) { + return nil, fmt.Errorf("native driver not supported on non-linux") +} diff --git a/daemon/execdriver/native/driver_unsupported_nocgo.go b/daemon/execdriver/native/driver_unsupported_nocgo.go new file mode 100644 index 00000000..2b8e9f81 --- /dev/null +++ b/daemon/execdriver/native/driver_unsupported_nocgo.go @@ -0,0 +1,13 @@ +// +build linux,!cgo + +package native + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func NewDriver(root, initPath string) (execdriver.Driver, error) { + return nil, fmt.Errorf("native driver not supported on non-linux") +} diff --git a/daemon/execdriver/native/exec.go b/daemon/execdriver/native/exec.go new file mode 100644 index 00000000..47f812ed --- /dev/null +++ b/daemon/execdriver/native/exec.go @@ -0,0 +1,59 @@ +// +build linux + +package native + +import ( + "fmt" + "os" + "os/exec" + "syscall" + + "github.com/docker/docker/daemon/execdriver" + "github.com/opencontainers/runc/libcontainer" + _ "github.com/opencontainers/runc/libcontainer/nsenter" + "github.com/opencontainers/runc/libcontainer/utils" +) + +// TODO(vishh): Add support for running in privileged mode. +func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + active := d.activeContainers[c.ID] + if active == nil { + return -1, fmt.Errorf("No active container exists with ID %s", c.ID) + } + + p := &libcontainer.Process{ + Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...), + Env: c.ProcessConfig.Env, + Cwd: c.WorkingDir, + User: processConfig.User, + } + + config := active.Config() + if err := setupPipes(&config, processConfig, p, pipes); err != nil { + return -1, err + } + + if err := active.Start(p); err != nil { + return -1, err + } + + if startCallback != nil { + pid, err := p.Pid() + if err != nil { + p.Signal(os.Kill) + p.Wait() + return -1, err + } + startCallback(&c.ProcessConfig, pid) + } + + ps, err := p.Wait() + if err != nil { + exitErr, ok := err.(*exec.ExitError) + if !ok { + return -1, err + } + ps = exitErr.ProcessState + } + return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil +} diff --git a/daemon/execdriver/native/info.go b/daemon/execdriver/native/info.go new file mode 100644 index 00000000..9d7342da --- /dev/null +++ b/daemon/execdriver/native/info.go @@ -0,0 +1,16 @@ +// +build linux,cgo + +package native + +type info struct { + ID string + driver *driver +} + +// IsRunning is determined by looking for the +// pid file for a container. If the file exists then the +// container is currently running +func (i *info) IsRunning() bool { + _, ok := i.driver.activeContainers[i.ID] + return ok +} diff --git a/daemon/execdriver/native/init.go b/daemon/execdriver/native/init.go new file mode 100644 index 00000000..307b5b1b --- /dev/null +++ b/daemon/execdriver/native/init.go @@ -0,0 +1,45 @@ +// +build linux + +package native + +import ( + "fmt" + "os" + "runtime" + + "github.com/docker/docker/pkg/reexec" + "github.com/opencontainers/runc/libcontainer" +) + +func init() { + reexec.Register(DriverName, initializer) +} + +func fatal(err error) { + if lerr, ok := err.(libcontainer.Error); ok { + lerr.Detail(os.Stderr) + os.Exit(1) + } + + fmt.Fprintln(os.Stderr, err) + os.Exit(1) +} + +func initializer() { + runtime.GOMAXPROCS(1) + runtime.LockOSThread() + factory, err := libcontainer.New("") + if err != nil { + fatal(err) + } + if err := factory.StartInitialization(); err != nil { + fatal(err) + } + + panic("unreachable") +} + +func writeError(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} diff --git a/daemon/execdriver/native/template/default_template.go b/daemon/execdriver/native/template/default_template.go new file mode 100644 index 00000000..7352101d --- /dev/null +++ b/daemon/execdriver/native/template/default_template.go @@ -0,0 +1,110 @@ +package template + +import ( + "syscall" + + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/configs" +) + +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +// New returns the docker default configuration for libcontainer +func New() *configs.Config { + container := &configs.Config{ + Capabilities: []string{ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", + "AUDIT_WRITE", + }, + Namespaces: configs.Namespaces([]configs.Namespace{ + {Type: "NEWNS"}, + {Type: "NEWUTS"}, + {Type: "NEWIPC"}, + {Type: "NEWPID"}, + {Type: "NEWNET"}, + }), + Cgroups: &configs.Cgroup{ + Parent: "docker", + AllowAllDevices: false, + MemorySwappiness: -1, + }, + Mounts: []*configs.Mount{ + { + Source: "proc", + Destination: "/proc", + Device: "proc", + Flags: defaultMountFlags, + }, + { + Source: "tmpfs", + Destination: "/dev", + Device: "tmpfs", + Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, + Data: "mode=755", + }, + { + Source: "devpts", + Destination: "/dev/pts", + Device: "devpts", + Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, + Data: "newinstance,ptmxmode=0666,mode=0620,gid=5", + }, + { + Device: "tmpfs", + Source: "shm", + Destination: "/dev/shm", + Data: "mode=1777,size=65536k", + Flags: defaultMountFlags, + }, + { + Source: "mqueue", + Destination: "/dev/mqueue", + Device: "mqueue", + Flags: defaultMountFlags, + }, + { + Source: "sysfs", + Destination: "/sys", + Device: "sysfs", + Flags: defaultMountFlags | syscall.MS_RDONLY, + }, + { + Source: "cgroup", + Destination: "/sys/fs/cgroup", + Device: "cgroup", + Flags: defaultMountFlags | syscall.MS_RDONLY, + }, + }, + MaskPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_stats", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + } + + if apparmor.IsEnabled() { + container.AppArmorProfile = "docker-default" + } + + return container +} diff --git a/daemon/execdriver/pipes.go b/daemon/execdriver/pipes.go new file mode 100644 index 00000000..158219f0 --- /dev/null +++ b/daemon/execdriver/pipes.go @@ -0,0 +1,23 @@ +package execdriver + +import ( + "io" +) + +// Pipes is a wrapper around a containers output for +// stdin, stdout, stderr +type Pipes struct { + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { + p := &Pipes{ + Stdout: stdout, + Stderr: stderr, + } + if useStdin { + p.Stdin = stdin + } + return p +} diff --git a/daemon/execdriver/termconsole.go b/daemon/execdriver/termconsole.go new file mode 100644 index 00000000..4dc18e57 --- /dev/null +++ b/daemon/execdriver/termconsole.go @@ -0,0 +1,46 @@ +package execdriver + +import ( + "io" + "os/exec" +) + +type StdConsole struct { +} + +func NewStdConsole(processConfig *ProcessConfig, pipes *Pipes) (*StdConsole, error) { + std := &StdConsole{} + + if err := std.AttachPipes(&processConfig.Cmd, pipes); err != nil { + return nil, err + } + return std, nil +} + +func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { + command.Stdout = pipes.Stdout + command.Stderr = pipes.Stderr + + if pipes.Stdin != nil { + stdin, err := command.StdinPipe() + if err != nil { + return err + } + + go func() { + defer stdin.Close() + io.Copy(stdin, pipes.Stdin) + }() + } + return nil +} + +func (s *StdConsole) Resize(h, w int) error { + // we do not need to reside a non tty + return nil +} + +func (s *StdConsole) Close() error { + // nothing to close here + return nil +} diff --git a/daemon/execdriver/utils.go b/daemon/execdriver/utils.go new file mode 100644 index 00000000..fd5a2705 --- /dev/null +++ b/daemon/execdriver/utils.go @@ -0,0 +1,114 @@ +package execdriver + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + Capabilities []*CapabilityMapping +) + +func (c *CapabilityMapping) String() string { + return c.Key +} + +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` aready handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` aready handled above + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + + return newCaps, nil +} diff --git a/daemon/execdriver/windows/checkoptions.go b/daemon/execdriver/windows/checkoptions.go new file mode 100644 index 00000000..cb67e8a2 --- /dev/null +++ b/daemon/execdriver/windows/checkoptions.go @@ -0,0 +1,36 @@ +// +build windows + +package windows + +import ( + "errors" + + "github.com/docker/docker/daemon/execdriver" +) + +func checkSupportedOptions(c *execdriver.Command) error { + // Windows doesn't support read-only root filesystem + if c.ReadonlyRootfs { + return errors.New("Windows does not support the read-only root filesystem option") + } + + // Windows doesn't support username + if c.ProcessConfig.User != "" { + return errors.New("Windows does not support the username option") + } + + // Windows doesn't support custom lxc options + if c.LxcConfig != nil { + return errors.New("Windows does not support lxc options") + } + + // Windows doesn't support ulimit + if c.Resources.Rlimits != nil { + return errors.New("Windows does not support ulimit options") + } + + // TODO Windows: Validate other fields which Windows doesn't support, factor + // out where applicable per platform. + + return nil +} diff --git a/daemon/execdriver/windows/clean.go b/daemon/execdriver/windows/clean.go new file mode 100644 index 00000000..29ae6f14 --- /dev/null +++ b/daemon/execdriver/windows/clean.go @@ -0,0 +1,7 @@ +// +build windows + +package windows + +func (d *driver) Clean(id string) error { + return nil +} diff --git a/daemon/execdriver/windows/exec.go b/daemon/execdriver/windows/exec.go new file mode 100644 index 00000000..1de1c246 --- /dev/null +++ b/daemon/execdriver/windows/exec.go @@ -0,0 +1,144 @@ +// +build windows + +package windows + +import ( + "errors" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/stringid" + "github.com/microsoft/hcsshim" + "github.com/natefinch/npipe" +) + +func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + + var ( + inListen, outListen, errListen *npipe.PipeListener + term execdriver.Terminal + err error + randomID string = stringid.GenerateRandomID() + serverPipeFormat, clientPipeFormat string + pid uint32 + exitCode int32 + ) + + active := d.activeContainers[c.ID] + if active == nil { + return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID) + } + + createProcessParms := hcsshim.CreateProcessParams{ + EmulateConsole: processConfig.Tty, // Note NOT c.ProcessConfig.Tty + WorkingDirectory: c.WorkingDir, + } + + // Configure the environment for the process // Note NOT c.ProcessConfig.Tty + createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env) + + // We use another unique ID here for each exec instance otherwise it + // may conflict with the pipe name being used by RUN. + + // We use a different pipe name between real and dummy mode in the HCS + if dummyMode { + clientPipeFormat = `\\.\pipe\docker-exec-%[1]s-%[2]s-%[3]s` + serverPipeFormat = clientPipeFormat + } else { + clientPipeFormat = `\\.\pipe\docker-exec-%[2]s-%[3]s` + serverPipeFormat = `\\.\Containers\%[1]s\Device\NamedPipe\docker-exec-%[2]s-%[3]s` + } + + // Connect stdin + if pipes.Stdin != nil { + stdInPipe := fmt.Sprintf(serverPipeFormat, c.ID, randomID, "stdin") + createProcessParms.StdInPipe = fmt.Sprintf(clientPipeFormat, c.ID, randomID, "stdin") + + // Listen on the named pipe + inListen, err = npipe.Listen(stdInPipe) + if err != nil { + logrus.Errorf("stdin failed to listen on %s %s ", stdInPipe, err) + return -1, err + } + defer inListen.Close() + + // Launch a goroutine to do the accept. We do this so that we can + // cause an otherwise blocking goroutine to gracefully close when + // the caller (us) closes the listener + go stdinAccept(inListen, stdInPipe, pipes.Stdin) + } + + // Connect stdout + stdOutPipe := fmt.Sprintf(serverPipeFormat, c.ID, randomID, "stdout") + createProcessParms.StdOutPipe = fmt.Sprintf(clientPipeFormat, c.ID, randomID, "stdout") + + outListen, err = npipe.Listen(stdOutPipe) + if err != nil { + logrus.Errorf("stdout failed to listen on %s %s", stdOutPipe, err) + return -1, err + } + defer outListen.Close() + go stdouterrAccept(outListen, stdOutPipe, pipes.Stdout) + + // No stderr on TTY. Note NOT c.ProcessConfig.Tty + if !processConfig.Tty { + // Connect stderr + stdErrPipe := fmt.Sprintf(serverPipeFormat, c.ID, randomID, "stderr") + createProcessParms.StdErrPipe = fmt.Sprintf(clientPipeFormat, c.ID, randomID, "stderr") + + errListen, err = npipe.Listen(stdErrPipe) + if err != nil { + logrus.Errorf("Stderr failed to listen on %s %s", stdErrPipe, err) + return -1, err + } + defer errListen.Close() + go stdouterrAccept(errListen, stdErrPipe, pipes.Stderr) + } + + // While this should get caught earlier, just in case, validate that we + // have something to run. + if processConfig.Entrypoint == "" { + err = errors.New("No entrypoint specified") + logrus.Error(err) + return -1, err + } + + // Build the command line of the process + createProcessParms.CommandLine = processConfig.Entrypoint + for _, arg := range processConfig.Arguments { + logrus.Debugln("appending ", arg) + createProcessParms.CommandLine += " " + arg + } + logrus.Debugln("commandLine: ", createProcessParms.CommandLine) + + // Start the command running in the container. + pid, err = hcsshim.CreateProcessInComputeSystem(c.ID, createProcessParms) + + if err != nil { + logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) + return -1, err + } + + // Note NOT c.ProcessConfig.Tty + if processConfig.Tty { + term = NewTtyConsole(c.ID, pid) + } else { + term = NewStdConsole() + } + processConfig.Terminal = term + + // Invoke the start callback + if startCallback != nil { + startCallback(&c.ProcessConfig, int(pid)) + } + + if exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid); err != nil { + logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err) + return -1, err + } + + // TODO Windows - Do something with this exit code + logrus.Debugln("Exiting Run() with ExitCode 0", c.ID) + return int(exitCode), nil +} diff --git a/daemon/execdriver/windows/getpids.go b/daemon/execdriver/windows/getpids.go new file mode 100644 index 00000000..b8f9c6f2 --- /dev/null +++ b/daemon/execdriver/windows/getpids.go @@ -0,0 +1,10 @@ +// +build windows + +package windows + +import "fmt" + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + // TODO Windows: Implementation required. + return nil, fmt.Errorf("GetPidsForContainer: GetPidsForContainer() not implemented") +} diff --git a/daemon/execdriver/windows/info.go b/daemon/execdriver/windows/info.go new file mode 100644 index 00000000..097f41e2 --- /dev/null +++ b/daemon/execdriver/windows/info.go @@ -0,0 +1,23 @@ +// +build windows + +package windows + +import "github.com/docker/docker/daemon/execdriver" + +type info struct { + ID string + driver *driver +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (i *info) IsRunning() bool { + var running bool + running = true // TODO Need an HCS API + return running +} diff --git a/daemon/execdriver/windows/namedpipes.go b/daemon/execdriver/windows/namedpipes.go new file mode 100644 index 00000000..67257e0b --- /dev/null +++ b/daemon/execdriver/windows/namedpipes.go @@ -0,0 +1,82 @@ +// +build windows + +package windows + +import ( + "io" + + "github.com/Sirupsen/logrus" + "github.com/natefinch/npipe" +) + +// stdinAccept runs as a go function. It waits for the container system +// to accept our offer of a named pipe for stdin. Once accepted, if we are +// running "attached" to the container (eg docker run -i), then we spin up +// a further thread to copy anything from the client into the container. +// +// Important design note. This function is run as a go function for a very +// good reason. The named pipe Accept call is blocking until one of two things +// happen. Either someone connects to it, or it is forcibly closed. Let's +// assume that no-one connects to it, the only way otherwise the Run() +// method would continue is by closing it. However, as that would be the same +// thread, it can't close it. Hence we run as another thread allowing Run() +// to close the named pipe. +func stdinAccept(inListen *npipe.PipeListener, pipeName string, copyfrom io.ReadCloser) { + + // Wait for the pipe to be connected to by the shim + logrus.Debugln("stdinAccept: Waiting on ", pipeName) + stdinConn, err := inListen.Accept() + if err != nil { + logrus.Errorf("Failed to accept on pipe %s %s", pipeName, err) + return + } + logrus.Debugln("Connected to ", stdinConn.RemoteAddr()) + + // Anything that comes from the client stdin should be copied + // across to the stdin named pipe of the container. + if copyfrom != nil { + go func() { + defer stdinConn.Close() + logrus.Debugln("Calling io.Copy on stdin") + bytes, err := io.Copy(stdinConn, copyfrom) + logrus.Debugf("Finished io.Copy on stdin bytes=%d err=%s pipe=%s", bytes, err, stdinConn.RemoteAddr()) + }() + } else { + defer stdinConn.Close() + } +} + +// stdouterrAccept runs as a go function. It waits for the container system to +// accept our offer of a named pipe - in fact two of them - one for stdout +// and one for stderr (we are called twice). Once the named pipe is accepted, +// if we are running "attached" to the container (eg docker run -i), then we +// spin up a further thread to copy anything from the containers output channels +// to the client. +func stdouterrAccept(outerrListen *npipe.PipeListener, pipeName string, copyto io.Writer) { + + // Wait for the pipe to be connected to by the shim + logrus.Debugln("out/err: Waiting on ", pipeName) + outerrConn, err := outerrListen.Accept() + if err != nil { + logrus.Errorf("Failed to accept on pipe %s %s", pipeName, err) + return + } + logrus.Debugln("Connected to ", outerrConn.RemoteAddr()) + + // Anything that comes from the container named pipe stdout/err should be copied + // across to the stdout/err of the client + if copyto != nil { + go func() { + defer outerrConn.Close() + logrus.Debugln("Calling io.Copy on ", pipeName) + bytes, err := io.Copy(copyto, outerrConn) + logrus.Debugf("Copied %d bytes from pipe=%s", bytes, outerrConn.RemoteAddr()) + if err != nil { + // Not fatal, just debug log it + logrus.Debugf("Error hit during copy %s", err) + } + }() + } else { + defer outerrConn.Close() + } +} diff --git a/daemon/execdriver/windows/pauseunpause.go b/daemon/execdriver/windows/pauseunpause.go new file mode 100644 index 00000000..35f12528 --- /dev/null +++ b/daemon/execdriver/windows/pauseunpause.go @@ -0,0 +1,17 @@ +// +build windows + +package windows + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func (d *driver) Pause(c *execdriver.Command) error { + return fmt.Errorf("Windows: Containers cannot be paused") +} + +func (d *driver) Unpause(c *execdriver.Command) error { + return fmt.Errorf("Windows: Containers cannot be paused") +} diff --git a/daemon/execdriver/windows/run.go b/daemon/execdriver/windows/run.go new file mode 100644 index 00000000..88eaa7f6 --- /dev/null +++ b/daemon/execdriver/windows/run.go @@ -0,0 +1,271 @@ +// +build windows + +package windows + +// Note this is alpha code for the bring up of containers on Windows. + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/microsoft/hcsshim" + "github.com/natefinch/npipe" +) + +type layer struct { + Id string + Path string +} + +type defConfig struct { + DefFile string +} + +type networkConnection struct { + NetworkName string + EnableNat bool +} +type networkSettings struct { + MacAddress string +} + +type device struct { + DeviceType string + Connection interface{} + Settings interface{} +} + +type containerInit struct { + SystemType string + Name string + IsDummy bool + VolumePath string + Devices []device + IgnoreFlushesDuringBoot bool + LayerFolderPath string + Layers []layer +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { + + var ( + term execdriver.Terminal + err error + inListen, outListen, errListen *npipe.PipeListener + ) + + // Make sure the client isn't asking for options which aren't supported + err = checkSupportedOptions(c) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + cu := &containerInit{ + SystemType: "Container", + Name: c.ID, + IsDummy: dummyMode, + VolumePath: c.Rootfs, + IgnoreFlushesDuringBoot: c.FirstStart, + LayerFolderPath: c.LayerFolder, + } + + for i := 0; i < len(c.LayerPaths); i++ { + cu.Layers = append(cu.Layers, layer{ + Id: hcsshim.NewGUID(c.LayerPaths[i]).ToString(), + Path: c.LayerPaths[i], + }) + } + + if c.Network.Interface != nil { + dev := device{ + DeviceType: "Network", + Connection: &networkConnection{ + NetworkName: c.Network.Interface.Bridge, + EnableNat: false, + }, + } + + if c.Network.Interface.MacAddress != "" { + windowsStyleMAC := strings.Replace( + c.Network.Interface.MacAddress, ":", "-", -1) + dev.Settings = networkSettings{ + MacAddress: windowsStyleMAC, + } + } + + logrus.Debugf("Virtual switch '%s', mac='%s'", c.Network.Interface.Bridge, c.Network.Interface.MacAddress) + + cu.Devices = append(cu.Devices, dev) + } else { + logrus.Debugln("No network interface") + } + + configurationb, err := json.Marshal(cu) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + configuration := string(configurationb) + + err = hcsshim.CreateComputeSystem(c.ID, configuration) + if err != nil { + logrus.Debugln("Failed to create temporary container ", err) + return execdriver.ExitStatus{ExitCode: -1}, err + } + + // Start the container + logrus.Debugln("Starting container ", c.ID) + err = hcsshim.StartComputeSystem(c.ID) + if err != nil { + logrus.Errorf("Failed to start compute system: %s", err) + return execdriver.ExitStatus{ExitCode: -1}, err + } + defer func() { + // Stop the container + + if terminateMode { + logrus.Debugf("Terminating container %s", c.ID) + if err := hcsshim.TerminateComputeSystem(c.ID); err != nil { + // IMPORTANT: Don't fail if fails to change state. It could already + // have been stopped through kill(). + // Otherwise, the docker daemon will hang in job wait() + logrus.Warnf("Ignoring error from TerminateComputeSystem %s", err) + } + } else { + logrus.Debugf("Shutting down container %s", c.ID) + if err := hcsshim.ShutdownComputeSystem(c.ID); err != nil { + // IMPORTANT: Don't fail if fails to change state. It could already + // have been stopped through kill(). + // Otherwise, the docker daemon will hang in job wait() + logrus.Warnf("Ignoring error from ShutdownComputeSystem %s", err) + } + } + }() + + // We use a different pipe name between real and dummy mode in the HCS + var serverPipeFormat, clientPipeFormat string + if dummyMode { + clientPipeFormat = `\\.\pipe\docker-run-%[1]s-%[2]s` + serverPipeFormat = clientPipeFormat + } else { + clientPipeFormat = `\\.\pipe\docker-run-%[2]s` + serverPipeFormat = `\\.\Containers\%[1]s\Device\NamedPipe\docker-run-%[2]s` + } + + createProcessParms := hcsshim.CreateProcessParams{ + EmulateConsole: c.ProcessConfig.Tty, + WorkingDirectory: c.WorkingDir, + ConsoleSize: c.ProcessConfig.ConsoleSize, + } + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env) + + // Connect stdin + if pipes.Stdin != nil { + stdInPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdin") + createProcessParms.StdInPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdin") + + // Listen on the named pipe + inListen, err = npipe.Listen(stdInPipe) + if err != nil { + logrus.Errorf("stdin failed to listen on %s err=%s", stdInPipe, err) + return execdriver.ExitStatus{ExitCode: -1}, err + } + defer inListen.Close() + + // Launch a goroutine to do the accept. We do this so that we can + // cause an otherwise blocking goroutine to gracefully close when + // the caller (us) closes the listener + go stdinAccept(inListen, stdInPipe, pipes.Stdin) + } + + // Connect stdout + stdOutPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdout") + createProcessParms.StdOutPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdout") + + outListen, err = npipe.Listen(stdOutPipe) + if err != nil { + logrus.Errorf("stdout failed to listen on %s err=%s", stdOutPipe, err) + return execdriver.ExitStatus{ExitCode: -1}, err + } + defer outListen.Close() + go stdouterrAccept(outListen, stdOutPipe, pipes.Stdout) + + // No stderr on TTY. + if !c.ProcessConfig.Tty { + // Connect stderr + stdErrPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stderr") + createProcessParms.StdErrPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stderr") + errListen, err = npipe.Listen(stdErrPipe) + if err != nil { + logrus.Errorf("stderr failed to listen on %s err=%s", stdErrPipe, err) + return execdriver.ExitStatus{ExitCode: -1}, err + } + defer errListen.Close() + go stdouterrAccept(errListen, stdErrPipe, pipes.Stderr) + } + + // This should get caught earlier, but just in case - validate that we + // have something to run + if c.ProcessConfig.Entrypoint == "" { + err = errors.New("No entrypoint specified") + logrus.Error(err) + return execdriver.ExitStatus{ExitCode: -1}, err + } + + // Build the command line of the process + createProcessParms.CommandLine = c.ProcessConfig.Entrypoint + for _, arg := range c.ProcessConfig.Arguments { + logrus.Debugln("appending ", arg) + createProcessParms.CommandLine += " " + arg + } + logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine) + + // Start the command running in the container. + var pid uint32 + pid, err = hcsshim.CreateProcessInComputeSystem(c.ID, createProcessParms) + + if err != nil { + logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) + return execdriver.ExitStatus{ExitCode: -1}, err + } + + //Save the PID as we'll need this in Kill() + logrus.Debugf("PID %d", pid) + c.ContainerPid = int(pid) + + if c.ProcessConfig.Tty { + term = NewTtyConsole(c.ID, pid) + } else { + term = NewStdConsole() + } + c.ProcessConfig.Terminal = term + + // Maintain our list of active containers. We'll need this later for exec + // and other commands. + d.Lock() + d.activeContainers[c.ID] = &activeContainer{ + command: c, + } + d.Unlock() + + // Invoke the start callback + if startCallback != nil { + startCallback(&c.ProcessConfig, int(pid)) + } + + var exitCode int32 + exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid) + if err != nil { + logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err) + return execdriver.ExitStatus{ExitCode: -1}, err + } + + logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID) + return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil +} diff --git a/daemon/execdriver/windows/stats.go b/daemon/execdriver/windows/stats.go new file mode 100644 index 00000000..b73cdc8f --- /dev/null +++ b/daemon/execdriver/windows/stats.go @@ -0,0 +1,13 @@ +// +build windows + +package windows + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) { + return nil, fmt.Errorf("Windows: Stats not implemented") +} diff --git a/daemon/execdriver/windows/stdconsole.go b/daemon/execdriver/windows/stdconsole.go new file mode 100644 index 00000000..abe9e944 --- /dev/null +++ b/daemon/execdriver/windows/stdconsole.go @@ -0,0 +1,21 @@ +// +build windows + +package windows + +// StdConsole is for when using a container non-interactively +type StdConsole struct { +} + +func NewStdConsole() *StdConsole { + return &StdConsole{} +} + +func (s *StdConsole) Resize(h, w int) error { + // we do not need to resize a non tty + return nil +} + +func (s *StdConsole) Close() error { + // nothing to close here + return nil +} diff --git a/daemon/execdriver/windows/terminatekill.go b/daemon/execdriver/windows/terminatekill.go new file mode 100644 index 00000000..f19f16fd --- /dev/null +++ b/daemon/execdriver/windows/terminatekill.go @@ -0,0 +1,45 @@ +// +build windows + +package windows + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/microsoft/hcsshim" +) + +func (d *driver) Terminate(p *execdriver.Command) error { + logrus.Debugf("WindowsExec: Terminate() id=%s", p.ID) + return kill(p.ID, p.ContainerPid) +} + +func (d *driver) Kill(p *execdriver.Command, sig int) error { + logrus.Debugf("WindowsExec: Kill() id=%s sig=%d", p.ID, sig) + return kill(p.ID, p.ContainerPid) +} + +func kill(id string, pid int) error { + logrus.Debugln("kill() ", id, pid) + var err error + + // Terminate Process + if err = hcsshim.TerminateProcessInComputeSystem(id, uint32(pid)); err != nil { + logrus.Warnf("Failed to terminate pid %d in %s", id, pid, err) + // Ignore errors + err = nil + } + + if terminateMode { + // Terminate the compute system + if err = hcsshim.TerminateComputeSystem(id); err != nil { + logrus.Errorf("Failed to terminate %s - %s", id, err) + } + + } else { + // Shutdown the compute system + if err = hcsshim.ShutdownComputeSystem(id); err != nil { + logrus.Errorf("Failed to shutdown %s - %s", id, err) + } + } + return err +} diff --git a/daemon/execdriver/windows/ttyconsole.go b/daemon/execdriver/windows/ttyconsole.go new file mode 100644 index 00000000..558352d2 --- /dev/null +++ b/daemon/execdriver/windows/ttyconsole.go @@ -0,0 +1,29 @@ +// +build windows + +package windows + +import ( + "github.com/microsoft/hcsshim" +) + +// TtyConsole is for when using a container interactively +type TtyConsole struct { + id string + processid uint32 +} + +func NewTtyConsole(id string, processid uint32) *TtyConsole { + tty := &TtyConsole{ + id: id, + processid: processid, + } + return tty +} + +func (t *TtyConsole) Resize(h, w int) error { + return hcsshim.ResizeConsoleInComputeSystem(t.id, t.processid, h, w) +} + +func (t *TtyConsole) Close() error { + return nil +} diff --git a/daemon/execdriver/windows/unsupported.go b/daemon/execdriver/windows/unsupported.go new file mode 100644 index 00000000..0a492e12 --- /dev/null +++ b/daemon/execdriver/windows/unsupported.go @@ -0,0 +1,13 @@ +// +build !windows + +package windows + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func NewDriver(root, initPath string) (execdriver.Driver, error) { + return nil, fmt.Errorf("Windows driver not supported on non-Windows") +} diff --git a/daemon/execdriver/windows/windows.go b/daemon/execdriver/windows/windows.go new file mode 100644 index 00000000..5e0e304f --- /dev/null +++ b/daemon/execdriver/windows/windows.go @@ -0,0 +1,90 @@ +// +build windows + +package windows + +import ( + "fmt" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/parsers" +) + +// This is a daemon development variable only and should not be +// used for running production containers on Windows. +var dummyMode bool + +// This allows the daemon to terminate containers rather than shutdown +var terminateMode bool + +var ( + DriverName = "Windows 1854" + Version = dockerversion.VERSION + " " + dockerversion.GITCOMMIT +) + +type activeContainer struct { + command *execdriver.Command +} + +type driver struct { + root string + initPath string + activeContainers map[string]*activeContainer + sync.Mutex +} + +func (d *driver) Name() string { + return fmt.Sprintf("%s %s", DriverName, Version) +} + +func NewDriver(root, initPath string, options []string) (*driver, error) { + + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + + case "dummy": + switch val { + case "1": + dummyMode = true + logrus.Warn("Using dummy mode in Windows exec driver. This is for development use only!") + } + + case "terminate": + switch val { + case "1": + terminateMode = true + logrus.Warn("Using terminate mode in Windows exec driver. This is for testing purposes only.") + } + + default: + return nil, fmt.Errorf("Unrecognised exec driver option %s\n", key) + } + } + + return &driver{ + root: root, + initPath: initPath, + activeContainers: make(map[string]*activeContainer), + }, nil +} + +// setupEnvironmentVariables convert a string array of environment variables +// into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. +func setupEnvironmentVariables(a []string) map[string]string { + r := make(map[string]string) + for _, s := range a { + arr := strings.Split(s, "=") + if len(arr) == 2 { + r[arr[0]] = arr[1] + } + } + return r +} diff --git a/daemon/export.go b/daemon/export.go new file mode 100644 index 00000000..0286b496 --- /dev/null +++ b/daemon/export.go @@ -0,0 +1,25 @@ +package daemon + +import ( + "fmt" + "io" +) + +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + data, err := container.Export() + if err != nil { + return fmt.Errorf("%s: %s", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("%s: %s", name, err) + } + return nil +} diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 00000000..eec4deee --- /dev/null +++ b/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,511 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + incompatibleFsMagic = []graphdriver.FsMagic{ + graphdriver.FsMagicBtrfs, + graphdriver.FsMagicAufs, + } + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +type Driver struct { + root string + sync.Mutex // Protects concurrent modification to active + active map[string]int +} + +// New returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + for _, magic := range incompatibleFsMagic { + if fsMagic == magic { + return nil, graphdriver.ErrIncompatibleFS + } + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + active: make(map[string]int), + } + + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := os.MkdirAll(root, 0755); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { + return nil, err + } + } + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// Three folders are created for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string) error { + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIds(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + return nil +} + +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil { + return err + } + } + return nil +} + +// Unmount and remove the dir information +func (a *Driver) Remove(id string) error { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if a.active[id] != 0 { + logrus.Errorf("Removing active id %s", id) + } + + // Make sure the dir is umounted first + if err := a.unmount(id); err != nil { + return err + } + tmpDirs := []string{ + "mnt", + "diff", + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + for _, p := range tmpDirs { + + realPath := path.Join(a.rootPath(), p, id) + tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) + if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpPath) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Return the rootfs path for the id +// This will mount the dir at it's given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + ids, err := getParentIds(a.rootPath(), id) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + ids = []string{} + } + + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + count := a.active[id] + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + out := path.Join(a.rootPath(), "diff", id) + if len(ids) > 0 { + out = path.Join(a.rootPath(), "mnt", id) + + if count == 0 { + if err := a.mount(id, mountLabel); err != nil { + return "", err + } + } + } + + a.active[id] = count + 1 + + return out, nil +} + +func (a *Driver) Put(id string) error { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if count := a.active[id]; count > 1 { + a.active[id] = count - 1 + } else { + ids, _ := getParentIds(a.rootPath(), id) + // We only mounted if there are any parents + if ids != nil && len(ids) > 0 { + a.unmount(id) + } + delete(a.active, id) + } + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (archive.Archive, error) { + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{".wh..wh.*"}, + }) +} + +func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error { + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), nil) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) { + // AUFS doesn't need the parent id to apply the diff. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIds(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id, mountLabel string) error { + // If the id is mounted or we get an error return + if mounted, err := a.mounted(id); err != nil || mounted { + return err + } + + var ( + target = path.Join(a.rootPath(), "mnt", id) + rw = path.Join(a.rootPath(), "diff", id) + ) + + layers, err := a.getParentLayerPaths(id) + if err != nil { + return err + } + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(id string) error { + if mounted, err := a.mounted(id); err != nil || !mounted { + return err + } + target := path.Join(a.rootPath(), "mnt", id) + return Unmount(target) +} + +func (a *Driver) mounted(id string) (bool, error) { + target := path.Join(a.rootPath(), "mnt", id) + return mountpk.Mounted(target) +} + +// During cleanup aufs needs to unmount all mountpoints +func (a *Driver) Cleanup() error { + ids, err := loadIds(path.Join(a.rootPath(), "layers")) + if err != nil { + return err + } + + for _, id := range ids { + if err := a.unmount(id); err != nil { + logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err) + } + } + + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len("dirperm1") + } + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + firstMount := true + i := 0 + + for { + for ; i < len(ro); i++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[i]) + + if firstMount { + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } else { + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", MsRemount, data); err != nil { + return + } + } + } + + if firstMount { + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + firstMount = false + } + + if i == len(ro) { + break + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go new file mode 100644 index 00000000..ab2b9941 --- /dev/null +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -0,0 +1,734 @@ +// +build linux + +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +var ( + tmpOuter = path.Join(os.TempDir(), "aufs-tests") + tmp = path.Join(tmpOuter, "aufs") +) + +func init() { + reexec.Init() +} + +func testInit(dir string, t *testing.T) graphdriver.Driver { + d, err := Init(dir, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t *testing.T) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatalf("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + response, err := d.mounted("1") + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatalf("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueReponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatalf("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatalf("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker"); err == nil { + t.Fatalf("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatalf("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id name should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[2] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func testMountMoreThan42Layers(t *testing.T, mountPath string) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(mountPath) + d := testInit(mountPath, t).(*Driver) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.Create(current, parent); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Error(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Error(err) + } + if len(files) != expected { + t.Errorf("Expected %d got %d", expected, len(files)) + } +} + +func TestMountMoreThan42Layers(t *testing.T) { + os.RemoveAll(tmpOuter) + testMountMoreThan42Layers(t, tmp) +} + +func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { + defer os.RemoveAll(tmpOuter) + zeroes := "0" + for { + // This finds a mount path so that when combined into aufs mount options + // 4096 byte boundary would be in between the paths or in permission + // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' + mountPath := path.Join(tmpOuter, zeroes, "aufs") + pathLength := 77 + len(mountPath) + + if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { + t.Logf("Using path: %s", mountPath) + testMountMoreThan42Layers(t, mountPath) + return + } + zeroes += "0" + } +} diff --git a/daemon/graphdriver/aufs/dirs.go b/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 00000000..08f1ffc0 --- /dev/null +++ b/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,48 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIds(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} diff --git a/daemon/graphdriver/aufs/migrate.go b/daemon/graphdriver/aufs/migrate.go new file mode 100644 index 00000000..08d8ebeb --- /dev/null +++ b/daemon/graphdriver/aufs/migrate.go @@ -0,0 +1,196 @@ +// +build linux + +package aufs + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" +) + +type metadata struct { + ID string `json:"id"` + ParentID string `json:"parent,omitempty"` + Image string `json:"Image,omitempty"` + + parent *metadata +} + +func pathExists(pth string) bool { + if _, err := os.Stat(pth); err != nil { + return false + } + return true +} + +// Migrate existing images and containers from docker < 0.7.x +// +// The format pre 0.7 is for docker to store the metadata and filesystem +// content in the same directory. For the migration to work we need to move Image layer +// data from /var/lib/docker/graph//layers to the diff of the registered id. +// +// Next we need to migrate the container's rw layer to diff of the driver. After the +// contents are migrated we need to register the image and container ids with the +// driver. +// +// For the migration we try to move the folder containing the layer files, if that +// fails because the data is currently mounted we will fallback to creating a +// symlink. +func (a *Driver) Migrate(pth string, setupInit func(p string) error) error { + if pathExists(path.Join(pth, "graph")) { + if err := a.migrateRepositories(pth); err != nil { + return err + } + if err := a.migrateImages(path.Join(pth, "graph")); err != nil { + return err + } + return a.migrateContainers(path.Join(pth, "containers"), setupInit) + } + return nil +} + +func (a *Driver) migrateRepositories(pth string) error { + name := path.Join(pth, "repositories") + if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "rw")) { + if err := tryRelocate(path.Join(pth, id, "rw"), path.Join(a.rootPath(), "diff", id)); err != nil { + return err + } + + if !a.Exists(id) { + + metadata, err := loadMetadata(path.Join(pth, id, "config.json")) + if err != nil { + return err + } + + initID := fmt.Sprintf("%s-init", id) + if err := a.Create(initID, metadata.Image); err != nil { + return err + } + + initPath, err := a.Get(initID, "") + if err != nil { + return err + } + // setup init layer + if err := setupInit(initPath); err != nil { + return err + } + + if err := a.Create(id, initID); err != nil { + return err + } + } + } + } + return nil +} + +func (a *Driver) migrateImages(pth string) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + var ( + m = make(map[string]*metadata) + current *metadata + exists bool + ) + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "layer")) { + if current, exists = m[id]; !exists { + current, err = loadMetadata(path.Join(pth, id, "json")) + if err != nil { + return err + } + m[id] = current + } + } + } + + for _, v := range m { + v.parent = m[v.ParentID] + } + + migrated := make(map[string]bool) + for _, v := range m { + if err := a.migrateImage(v, pth, migrated); err != nil { + return err + } + } + return nil +} + +func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) error { + if !migrated[m.ID] { + if m.parent != nil { + a.migrateImage(m.parent, pth, migrated) + } + if err := tryRelocate(path.Join(pth, m.ID, "layer"), path.Join(a.rootPath(), "diff", m.ID)); err != nil { + return err + } + if !a.Exists(m.ID) { + if err := a.Create(m.ID, m.ParentID); err != nil { + return err + } + } + migrated[m.ID] = true + } + return nil +} + +// tryRelocate will try to rename the old path to the new pack and if +// the operation fails, it will fallback to a symlink +func tryRelocate(oldPath, newPath string) error { + s, err := os.Lstat(newPath) + if err != nil && !os.IsNotExist(err) { + return err + } + // If the destination is a symlink then we already tried to relocate once before + // and it failed so we delete it and try to remove + if s != nil && s.Mode()&os.ModeSymlink != 0 { + if err := os.RemoveAll(newPath); err != nil { + return err + } + } + if err := os.Rename(oldPath, newPath); err != nil { + if sErr := os.Symlink(oldPath, newPath); sErr != nil { + return fmt.Errorf("Unable to relocate %s to %s: Rename err %s Symlink err %s", oldPath, newPath, err, sErr) + } + } + return nil +} + +func loadMetadata(pth string) (*metadata, error) { + f, err := os.Open(pth) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + out = &metadata{} + dec = json.NewDecoder(f) + ) + + if err := dec.Decode(out); err != nil { + return nil, err + } + return out, nil +} diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go new file mode 100644 index 00000000..b82b17f2 --- /dev/null +++ b/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,20 @@ +// +build linux + +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/Sirupsen/logrus" +) + +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Errorf("Couldn't run auplink before unmount: %s", err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/daemon/graphdriver/aufs/mount_linux.go b/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 00000000..c86f1bbd --- /dev/null +++ b/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,9 @@ +package aufs + +import "syscall" + +const MsRemount = syscall.MS_REMOUNT + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/daemon/graphdriver/aufs/mount_unsupported.go b/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 00000000..0cc31d54 --- /dev/null +++ b/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package aufs + +import "errors" + +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 00000000..044a9c27 --- /dev/null +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,237 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "syscall" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/mount" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + rootdir := path.Dir(home) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, err + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + if err := os.MkdirAll(home, 0700); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + } + + return graphdriver.NaiveDiffDriver(driver), nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "btrfs" +} + +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := BtrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := BtrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func subvolDelete(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirId(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) Create(id string, parent string) error { + subvolumes := path.Join(d.home, "subvolumes") + if err := os.MkdirAll(subvolumes, 0700); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir, err := d.Get(parent, "") + if err != nil { + return err + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + return nil +} + +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirId(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirId(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirId(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/daemon/graphdriver/btrfs/btrfs_test.go b/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 00000000..bfd15e06 --- /dev/null +++ b/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,30 @@ +// +build linux + +package btrfs + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/btrfs/dummy_unsupported.go b/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 00000000..f0708888 --- /dev/null +++ b/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/daemon/graphdriver/btrfs/version.go b/daemon/graphdriver/btrfs/version.go new file mode 100644 index 00000000..25248b15 --- /dev/null +++ b/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func BtrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func BtrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/daemon/graphdriver/btrfs/version_none.go b/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 00000000..b32fc61c --- /dev/null +++ b/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utililties of >= 3.16.1 + +func BtrfsBuildVersion() string { + return "-" +} + +func BtrfsLibVersion() int { + return -1 +} diff --git a/daemon/graphdriver/btrfs/version_test.go b/daemon/graphdriver/btrfs/version_test.go new file mode 100644 index 00000000..02fb1315 --- /dev/null +++ b/daemon/graphdriver/btrfs/version_test.go @@ -0,0 +1,13 @@ +// +build linux,!btrfs_noversion + +package btrfs + +import ( + "testing" +) + +func TestLibVersion(t *testing.T) { + if BtrfsLibVersion() <= 0 { + t.Errorf("expected output from btrfs lib version > 0") + } +} diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md new file mode 100644 index 00000000..4d2cad7b --- /dev/null +++ b/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,84 @@ +## devicemapper - a storage backend based on Device Mapper + +### Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. For each devicemapper +graph location (typically `/var/lib/docker/devicemapper`, $graph below) +a thin pool is created based on two block devices, one for data and +one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are `$graph/devicemapper/data` and +`$graph/devicemapper/metadata`. Additional metadata required to map +from docker entities to the corresponding devicemapper volumes is +stored in the `$graph/devicemapper/json` file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the $graph directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +### Information on `docker info` + +As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver +will display something like: + + $ sudo docker info + [...] + Storage Driver: devicemapper + Pool Name: docker-253:1-17538953-pool + Pool Blocksize: 65.54 kB + Data file: /dev/loop4 + Metadata file: /dev/loop4 + Data Space Used: 2.536 GB + Data Space Total: 107.4 GB + Data Space Available: 104.8 GB + Metadata Space Used: 7.93 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.14 GB + Udev Sync Supported: true + Data loop file: /home/docker/devicemapper/devicemapper/data + Metadata loop file: /home/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.82-git (2013-10-04) + [...] + +#### status items + +Each item in the indented section under `Storage Driver: devicemapper` are +status information about the driver. + * `Pool Name` name of the devicemapper pool for this driver. + * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. + * `Data file` blockdevice file used for the devicemapper data + * `Metadata file` blockdevice file used for the devicemapper metadata + * `Data Space Used` tells how much of `Data file` is currently used + * `Data Space Total` tells max size the `Data file` + * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Metadata Space Used` tells how much of `Metadata file` is currently used + * `Metadata Space Total` tells max size the `Metadata file` + * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. + * `Data loop file` file attached to `Data file`, if loopback device is used + * `Metadata loop file` file attached to `Metadata file`, if loopback device is used + * `Library Version` from the libdevmapper used + +### About the devicemapper options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `docker -d --storage-opt dm.foo=bar`. + +These options are currently documented both in [the man +page](../../../man/docker.1.md) and in [the online +documentation](https://docs.docker.com/reference/commandline/daemon/#docker- +execdriver-option). If you add an options, update both the `man` page and the +documentation. diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 00000000..2eee3301 --- /dev/null +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,2021 @@ +// +build linux + +package devmapper + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/units" + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + DefaultBaseFsSize uint64 = 100 * 1024 * 1024 * 1024 + DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + DefaultUdevSyncOverride bool = false + MaxDeviceId int = 0xffffff // 24 bit, pool limit + DeviceIdMapSz int = (MaxDeviceId + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + DMLogLevel int = devicemapper.LogLevelFatal + DriverDeferredRemovalSupport bool = false + EnableDeferredRemoval bool = false +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type Transaction struct { + OpenTransactionId uint64 `json:"open_transaction_id"` + DeviceIdHash string `json:"device_hash"` + DeviceId int `json:"device_id"` +} + +type DevInfo struct { + Hash string `json:"-"` + DeviceId int `json:"device_id"` + Size uint64 `json:"size"` + TransactionId uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + devices *DeviceSet + + mountCount int + mountPath string + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be aquired *before* the device lock, and + // multiple device locks should be aquired parent before child. + lock sync.Mutex +} + +type MetaData struct { + Devices map[string]*DevInfo `json:"Devices"` + devicesLock sync.Mutex // Protects all read/writes to Devices map +} + +type DeviceSet struct { + MetaData `json:"-"` + sync.Mutex `json:"-"` // Protects Devices map and serializes calls into libdevmapper + root string + devicePrefix string + TransactionId uint64 `json:"-"` + NextDeviceId int `json:"next_device_id"` + deviceIdMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + Transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + BaseDeviceUUID string //save UUID of base device +} + +type DiskUsage struct { + Used uint64 + Total uint64 + Available uint64 +} + +type Status struct { + PoolName string + DataFile string // actual block device for data + DataLoopback string // loopback file, if used + MetadataFile string // actual block device for metadata + MetadataLoopback string // loopback file, if used + Data DiskUsage + Metadata DiskUsage + SectorSize uint64 + UdevSyncSupported bool + DeferredRemoveEnabled bool +} + +// Structure used to export image/container metadata in docker inspect. +type DeviceMetadata struct { + deviceId int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +type DevStatus struct { + DeviceId int + Size uint64 + TransactionId uint64 + SizeInSectors uint64 + MappedSectors uint64 + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *DevInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *DevInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *DevInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists, it does nothing. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) { + return "", err + } + + if _, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionId() uint64 { + devices.OpenTransactionId = devices.TransactionId + 1 + return devices.OpenTransactionId +} + +func (devices *DeviceSet) updatePoolTransactionId() error { + if err := devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.OpenTransactionId); err != nil { + return fmt.Errorf("Error setting devmapper transaction ID: %s", err) + } + devices.TransactionId = devices.OpenTransactionId + return nil +} + +func (devices *DeviceSet) removeMetadata(info *DevInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *DevInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIdUsed(deviceId int) { + var mask byte + i := deviceId % 8 + mask = 1 << uint(i) + devices.deviceIdMap[deviceId/8] = devices.deviceIdMap[deviceId/8] | mask +} + +func (devices *DeviceSet) markDeviceIdFree(deviceId int) { + var mask byte + i := deviceId % 8 + mask = ^(1 << uint(i)) + devices.deviceIdMap[deviceId/8] = devices.deviceIdMap[deviceId/8] & mask +} + +func (devices *DeviceSet) isDeviceIdFree(deviceId int) bool { + var mask byte + i := deviceId % 8 + mask = (1 << uint(i)) + if (devices.deviceIdMap[deviceId/8] & mask) != 0 { + return false + } + return true +} + +func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { + devices.devicesLock.Lock() + defer devices.devicesLock.Unlock() + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("Skipping file %s", path) + return nil + } + + logrus.Debugf("Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + dinfo := devices.loadMetadata(hash) + if dinfo == nil { + return fmt.Errorf("Error loading device metadata file %s", hash) + } + + if dinfo.DeviceId > MaxDeviceId { + logrus.Errorf("Ignoring Invalid DeviceId=%d", dinfo.DeviceId) + return nil + } + + devices.Lock() + devices.markDeviceIdUsed(dinfo.DeviceId) + devices.Unlock() + + logrus.Debugf("Added deviceId=%d to DeviceIdMap", dinfo.DeviceId) + return nil +} + +func (devices *DeviceSet) constructDeviceIdMap() error { + logrus.Debugf("[deviceset] constructDeviceIdMap()") + defer logrus.Debugf("[deviceset] constructDeviceIdMap() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + logrus.Debugf("unregisterDevice(%v, %v)", id, hash) + info := &DevInfo{ + Hash: hash, + DeviceId: id, + } + + devices.devicesLock.Lock() + delete(devices.Devices, hash) + devices.devicesLock.Unlock() + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("Error removing metadata: %s", err) + return err + } + + return nil +} + +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionId uint64) (*DevInfo, error) { + logrus.Debugf("registerDevice(%v, %v)", id, hash) + info := &DevInfo{ + Hash: hash, + DeviceId: id, + Size: size, + TransactionId: transactionId, + Initialized: false, + devices: devices, + } + + devices.devicesLock.Lock() + devices.Devices[hash] = info + devices.devicesLock.Unlock() + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + devices.devicesLock.Lock() + delete(devices.Devices, hash) + devices.devicesLock.Unlock() + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { + logrus.Debugf("activateDeviceIfNeeded(%v)", info.Hash) + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemoval(info); err != nil { + return fmt.Errorf("Deivce Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) +} + +func (devices *DeviceSet) createFilesystem(info *DevInfo) error { + devname := info.DevName() + + args := []string{} + for _, arg := range devices.mkfsArgs { + args = append(args, arg) + } + + args = append(args, devname) + + var err error + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("Unsupported filesystem type %s", devices.filesystem) + } + if err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := MetaData{Devices: make(map[string]*DevInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +func (devices *DeviceSet) initMetaData() error { + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionId, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionId = transactionId + + if err := devices.constructDeviceIdMap(); err != nil { + return err + } + + if err := devices.processPendingTransaction(); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) incNextDeviceId() { + // Ids are 24bit, so wrap around + devices.NextDeviceId = (devices.NextDeviceId + 1) & MaxDeviceId +} + +func (devices *DeviceSet) getNextFreeDeviceId() (int, error) { + devices.incNextDeviceId() + for i := 0; i <= MaxDeviceId; i++ { + if devices.isDeviceIdFree(devices.NextDeviceId) { + devices.markDeviceIdUsed(devices.NextDeviceId) + return devices.NextDeviceId, nil + } + devices.incNextDeviceId() + } + + return 0, fmt.Errorf("Unable to find a free device Id") +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { + deviceId, err := devices.getNextFreeDeviceId() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceId); err != nil { + logrus.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId) + devices.markDeviceIdFree(deviceId) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceId); err != nil { + if devicemapper.DeviceIdExists(err) { + // Device Id already exists. This should not + // happen. Now we have a mechianism to find + // a free device Id. So something is not right. + // Give a warning and continue. + logrus.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId) + deviceId, err = devices.getNextFreeDeviceId() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceId) + continue + } + logrus.Debugf("Error creating device: %s", err) + devices.markDeviceIdFree(deviceId) + return nil, err + } + break + } + + logrus.Debugf("Registering device (id %v) with FS size %v", deviceId, devices.baseFsSize) + info, err := devices.registerDevice(deviceId, hash, devices.baseFsSize, devices.OpenTransactionId) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceId, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInfo) error { + deviceId, err := devices.getNextFreeDeviceId() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceId); err != nil { + logrus.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId) + devices.markDeviceIdFree(deviceId) + return err + } + + for { + if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + if devicemapper.DeviceIdExists(err) { + // Device Id already exists. This should not + // happen. Now we have a mechianism to find + // a free device Id. So something is not right. + // Give a warning and continue. + logrus.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId) + deviceId, err = devices.getNextFreeDeviceId() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceId) + continue + } + logrus.Debugf("Error creating snap device: %s", err) + devices.markDeviceIdFree(deviceId) + return err + } + break + } + + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, devices.OpenTransactionId); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) + logrus.Debugf("Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceId, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { + info := &DevInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + logrus.Debugf("Failed to find uuid for device %s:%v", device, err) + return "", err + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) verifyBaseDeviceUUID(baseInfo *DevInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo); err != nil { + return err + } + + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("Current Base Device UUID:%s does not match with stored UUID:%s", uuid, devices.BaseDeviceUUID) + } + + return nil +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *DevInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo); err != nil { + return err + } + + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + devices.saveDeviceSetMetaData() + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDevice("") + if oldInfo != nil && oldInfo.Initialized { + // If BaseDeviceUUID is nil (upgrade case), save it and + // return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(oldInfo); err != nil { + return fmt.Errorf("Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUID(oldInfo); err != nil { + return fmt.Errorf("Base Device UUID verification failed. Possibly using a different thin pool then last invocation:%v", err) + } + return nil + } + + if oldInfo != nil && !oldInfo.Initialized { + logrus.Debugf("Removing uninitialized base image") + if err := devices.DeleteDevice(""); err != nil { + return err + } + } + + if devices.thinPoolDevice != "" && oldInfo == nil { + _, transactionId, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionId != 0 { + return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id", + devices.thinPoolDevice) + } + } + + logrus.Debugf("Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debugf("Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("Could not query and save base device UUID:%v", err) + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > DMLogLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("Can't shrink file") + } + + dataloopback := devicemapper.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := devicemapper.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := devicemapper.LoopbackSetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionId = devices.TransactionId + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.Transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.Transaction) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("Rolling back open transaction: TransactionId=%d hash=%s device_id=%d", devices.OpenTransactionId, devices.DeviceIdHash, devices.DeviceId) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceId); err != nil { + logrus.Errorf("Unable to delete device: %s", err) + } + + dinfo := &DevInfo{Hash: devices.DeviceIdHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("Unable to remove metadata: %s", err) + } else { + devices.markDeviceIdFree(devices.DeviceId) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction Id is same + // as open transaction Id, nothing to roll back. + if devices.TransactionId == devices.OpenTransactionId { + return nil + } + + // If open transaction Id is less than pool transaction Id, something + // is wrong. Bail out. + if devices.OpenTransactionId < devices.TransactionId { + logrus.Errorf("Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionId, devices.TransactionId) + return nil + } + + // Pool transaction Id is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionId = devices.TransactionId + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceId int) error { + devices.allocateTransactionId() + devices.DeviceIdHash = hash + devices.DeviceId = DeviceId + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceId int) error { + devices.DeviceId = DeviceId + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionId(); err != nil { + logrus.Debugf("Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + DriverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + DriverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("[devmapper]: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("[devmapper]: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := devicemapper.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("[devmapper]: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("[devmapper]: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return graphdriver.ErrNotSupported + } + + // If user asked for deferred removal and both library and driver + // supports deferred removal use it. + if EnableDeferredRemoval && DriverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport == true { + logrus.Debugf("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + logrus.Warn("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors. For more information, see https://docs.docker.com/reference/commandline/cli/#daemon-storage-driver-option") + } + + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + logrus.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) + info, err := devicemapper.GetInfo(devices.getPoolName()) + if info == nil { + logrus.Debugf("Error device devicemapper.GetInfo: %s", err) + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if info.Exists == 0 && devices.thinPoolDevice == "" { + logrus.Debugf("Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = devicemapper.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = devicemapper.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if info.Exists != 0 && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + // Right now this loads only NextDeviceId. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +func (devices *DeviceSet) AddDevice(hash, baseHash string) error { + logrus.Debugf("[deviceset] AddDevice(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("[deviceset] AddDevice(hash=%s basehash=%s) END", hash, baseHash) + + baseInfo, err := devices.lookupDevice(baseHash) + if err != nil { + return err + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("device %s already exists", hash) + } + + if err := devices.createRegisterSnapDevice(hash, baseInfo); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) deleteDevice(info *DevInfo) error { + if devices.doBlkDiscard { + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually + if err := devices.activateDeviceIfNeeded(info); err == nil { + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("Error discarding block on device: %s (ignoring)", err) + } + } + } + + devinfo, _ := devicemapper.GetInfo(info.Name()) + if devinfo != nil && devinfo.Exists != 0 { + if err := devices.removeDevice(info.Name()); err != nil { + logrus.Debugf("Error removing device: %s", err) + return err + } + } + + if err := devices.openTransaction(info.Hash, info.DeviceId); err != nil { + logrus.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceId) + return err + } + + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { + logrus.Debugf("Error deleting device: %s", err) + return err + } + + if err := devices.unregisterDevice(info.DeviceId, info.Hash); err != nil { + return err + } + + if err := devices.closeTransaction(); err != nil { + return err + } + + devices.markDeviceIdFree(info.DeviceId) + + return nil +} + +func (devices *DeviceSet) DeleteDevice(hash string) error { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debugf("[devmapper] deactivatePool()") + defer logrus.Debugf("[devmapper] deactivatePool END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + if d, err := devicemapper.GetDeps(devname); err == nil { + // Access to more Debug output + logrus.Debugf("[devmapper] devicemapper.GetDeps() %s: %#v", devname, d) + } + if devinfo.Exists != 0 { + return devicemapper.RemoveDevice(devname) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { + logrus.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) + defer logrus.Debugf("[devmapper] deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } + } else { + if err := devices.removeDevice(info.Name()); err != nil { + return err + } + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("[devmapper] removeDevice START(%s)", devname) + defer logrus.Debugf("[devmapper] removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *DevInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("[devmapper] cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("[devmapper] cancelDeferredRemoval END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err == nil { + break + } + + if err == devicemapper.ErrEnxio { + // Device is probably already gone. Return success. + return nil + } + + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + return err +} + +func (devices *DeviceSet) Shutdown() error { + logrus.Debugf("[deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("[deviceset %s] Shutdown() END", devices.devicePrefix) + + var devs []*DevInfo + + devices.devicesLock.Lock() + for _, info := range devices.Devices { + devs = append(devs, info) + } + devices.devicesLock.Unlock() + + for _, info := range devs { + info.lock.Lock() + if info.mountCount > 0 { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { + logrus.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err) + } + + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err) + } + devices.Unlock() + } + info.lock.Unlock() + } + + info, _ := devices.lookupDevice("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("Shutdown deactivate pool , error: %s", err) + } + } + + devices.saveDeviceSetMetaData() + devices.Unlock() + + return nil +} + +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info.mountCount > 0 { + if path != info.mountPath { + return fmt.Errorf("Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path) + } + + info.mountCount++ + return nil + } + + if err := devices.activateDeviceIfNeeded(info); err != nil { + return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + var flags uintptr = syscall.MS_MGC_VAL + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := syscall.Mount(info.DevName(), path, fstype, flags, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + info.mountCount = 1 + info.mountPath = path + + return nil +} + +func (devices *DeviceSet) UnmountDevice(hash string) error { + logrus.Debugf("[devmapper] UnmountDevice(hash=%s)", hash) + defer logrus.Debugf("[devmapper] UnmountDevice(hash=%s) END", hash) + + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info.mountCount == 0 { + return fmt.Errorf("UnmountDevice: device not-mounted id %s", hash) + } + + info.mountCount-- + if info.mountCount > 0 { + return nil + } + + logrus.Debugf("[devmapper] Unmount(%s)", info.mountPath) + if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { + return err + } + logrus.Debugf("[devmapper] Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + info.mountPath = "" + + return nil +} + +func (devices *DeviceSet) HasDevice(hash string) bool { + devices.Lock() + defer devices.Unlock() + + info, _ := devices.lookupDevice(hash) + return info != nil +} + +func (devices *DeviceSet) HasActivatedDevice(hash string) bool { + info, _ := devices.lookupDevice(hash) + if info == nil { + return false + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + devinfo, _ := devicemapper.GetInfo(info.Name()) + return devinfo != nil && devinfo.Exists != 0 +} + +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + devices.devicesLock.Lock() + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + devices.devicesLock.Unlock() + + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDevice(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceId: info.DeviceId, + Size: info.Size, + TransactionId: info.TransactionId, + } + + if err := devices.activateDeviceIfNeeded(info); err != nil { + return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { + logrus.Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) ExportDeviceMetadata(hash string) (*DeviceMetadata, error) { + info, err := devices.lookupDevice(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &DeviceMetadata{info.DeviceId, info.Size, info.Name()} + return metadata, nil +} + +func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + MetaData: MetaData{Devices: make(map[string]*DevInfo)}, + dataLoopbackSize: DefaultDataLoopbackSize, + metaDataLoopbackSize: DefaultMetaDataLoopbackSize, + baseFsSize: DefaultBaseFsSize, + overrideUdevSyncCheck: DefaultUdevSyncOverride, + filesystem: "ext4", + doBlkDiscard: true, + thinpBlockSize: DefaultThinpBlockSize, + deviceIdMap: make([]byte, DeviceIdMapSz), + } + + foundBlkDiscard := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + EnableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("Unknown option %s\n", key) + } + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/daemon/graphdriver/devmapper/devmapper_doc.go b/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 00000000..c1c3e389 --- /dev/null +++ b/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognised ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 00000000..60006af5 --- /dev/null +++ b/daemon/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,42 @@ +// +build linux + +package devmapper + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +func init() { + // Reduce the size the the base fs and loopback for the tests + DefaultDataLoopbackSize = 300 * 1024 * 1024 + DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 + DefaultBaseFsSize = 300 * 1024 * 1024 + DefaultUdevSyncOverride = true + if err := graphtest.InitLoopbacks(); err != nil { + panic(err) + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 00000000..237c4b04 --- /dev/null +++ b/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,190 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Placeholder interfaces, to be replaced +// at integration. + +// End of placeholder interfaces. + +type Driver struct { + *DeviceSet + home string +} + +var backingFs = "" + +func Init(home string, options []string) (graphdriver.Driver, error) { + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + deviceSet, err := NewDeviceSet(home, true, options) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + } + + return graphdriver.NaiveDiffDriver(d), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Backing Filesystem", backingFs}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.ExportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceId) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown() + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +func (d *Driver) Create(id, parent string) error { + if err := d.DeviceSet.AddDevice(id, parent); err != nil { + return err + } + + return nil +} + +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + + // Create the target directories if they don't exist + if err := os.MkdirAll(mp, 0755); err != nil && !os.IsExist(err) { + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + return "", err + } + + rootFs := path.Join(mp, "rootfs") + if err := os.MkdirAll(rootFs, 0755); err != nil && !os.IsExist(err) { + d.DeviceSet.UnmountDevice(id) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconscruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.DeviceSet.UnmountDevice(id) + return "", err + } + } + + return rootFs, nil +} + +func (d *Driver) Put(id string) error { + err := d.DeviceSet.UnmountDevice(id) + if err != nil { + logrus.Errorf("Error unmounting device %s: %s", id, err) + } + return err +} + +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/daemon/graphdriver/devmapper/mount.go b/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 00000000..dc9026e4 --- /dev/null +++ b/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,87 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go new file mode 100644 index 00000000..2f44fddc --- /dev/null +++ b/daemon/graphdriver/driver.go @@ -0,0 +1,198 @@ +package graphdriver + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +type FsMagic uint32 + +const ( + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + DefaultDriver string + // All registred drivers + drivers map[string]InitFunc + + ErrNotSupported = errors.New("driver not supported") + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +type InitFunc func(root string, options []string) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // Create creates a new, empty, filesystem layer with the + // specified id and parent. Parent may be "". + Create(id, parent string) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (archive.Archive, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.ArchiveReader must be an uncompressed stream. + ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +func init() { + drivers = make(map[string]InitFunc) +} + +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +func GetDriver(name, home string, options []string) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options) + } + logrus.Errorf("Failed to GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +func New(root string, options []string) (driver Driver, err error) { + for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, root, options) + } + } + + // Guess for prior driver + priorDrivers := scanPriorDrivers(root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + for _, prior := range priorDrivers { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + if prior == name { + driver, err = GetDriver(name, root, options) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) + return nil, err + } + if err := checkPriorDriver(name, root); err != nil { + return nil, err + } + logrus.Infof("[graphdriver] using prior storage driver %q", name) + return driver, nil + } + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err = GetDriver(name, root, options) + if err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for _, initFunc := range drivers { + if driver, err = initFunc(root, options); err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) []string { + priorDrivers := []string{} + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + priorDrivers = append(priorDrivers, driver) + } + } + return priorDrivers +} + +func checkPriorDriver(name, root string) error { + priorDrivers := []string{} + for _, prior := range scanPriorDrivers(root) { + if prior != name && prior != "vfs" { + if _, err := os.Stat(filepath.Join(root, prior)); err == nil { + priorDrivers = append(priorDrivers, prior) + } + } + } + + if len(priorDrivers) > 0 { + + return errors.New(fmt.Sprintf("%q contains other graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(priorDrivers, ","))) + } + return nil +} diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go new file mode 100644 index 00000000..88d88e26 --- /dev/null +++ b/daemon/graphdriver/driver_linux.go @@ -0,0 +1,65 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" +) + +const ( + FsMagicAufs = FsMagic(0x61756673) + FsMagicBtrfs = FsMagic(0x9123683E) + FsMagicCramfs = FsMagic(0x28cd3d45) + FsMagicExtfs = FsMagic(0x0000EF53) + FsMagicF2fs = FsMagic(0xF2F52010) + FsMagicJffs2Fs = FsMagic(0x000072b6) + FsMagicJfs = FsMagic(0x3153464a) + FsMagicNfsFs = FsMagic(0x00006969) + FsMagicRamFs = FsMagic(0x858458f6) + FsMagicReiserFs = FsMagic(0x52654973) + FsMagicSmbFs = FsMagic(0x0000517B) + FsMagicSquashFs = FsMagic(0x73717368) + FsMagicTmpFs = FsMagic(0x01021994) + FsMagicXfs = FsMagic(0x58465342) + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "devicemapper", + "overlay", + "vfs", + } + + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicRamFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} diff --git a/daemon/graphdriver/driver_unsupported.go b/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 00000000..3f368648 --- /dev/null +++ b/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux,!windows + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/daemon/graphdriver/driver_windows.go b/daemon/graphdriver/driver_windows.go new file mode 100644 index 00000000..387f666c --- /dev/null +++ b/daemon/graphdriver/driver_windows.go @@ -0,0 +1,29 @@ +package graphdriver + +import ( + "github.com/docker/docker/pkg/archive" + "github.com/microsoft/hcsshim" +) + +type WindowsGraphDriver interface { + Driver + CopyDiff(id, sourceId string, parentLayerPaths []string) error + LayerIdsToPaths(ids []string) []string + Info() hcsshim.DriverInfo + Export(id string, parentLayerPaths []string) (archive.Archive, error) + Import(id string, layerData archive.ArchiveReader, parentLayerPaths []string) (int64, error) +} + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + "windowsdiff", + "vfs", + } +) + +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go new file mode 100644 index 00000000..bee9682e --- /dev/null +++ b/daemon/graphdriver/fsdiff.go @@ -0,0 +1,150 @@ +// +build daemon + +package graphdriver + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" +) + +// naiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type naiveDiffDriver struct { + ProtoDriver +} + +// NaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NaiveDiffDriver(driver ProtoDriver) Driver { + return &naiveDiffDriver{ProtoDriver: driver} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *naiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *naiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + start := time.Now().UTC() + logrus.Debugf("Start untar layer") + if size, err = chrootarchive.ApplyUncompressedLayer(layerFs, diff); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *naiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/daemon/graphdriver/graphtest/graphtest.go b/daemon/graphdriver/graphtest/graphtest.go new file mode 100644 index 00000000..d9908d40 --- /dev/null +++ b/daemon/graphdriver/graphtest/graphtest.go @@ -0,0 +1,271 @@ +package graphtest + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" +) + +var ( + drv *Driver +) + +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +// InitLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func InitLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create atleast 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + +func newDriver(t *testing.T, name string) *Driver { + root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, root, nil) + if err != nil { + t.Logf("graphdriver: %v\n", err) + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { + t.Skipf("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t *testing.T, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +func GetDriver(t *testing.T, name string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name) + } else { + drv.refCount++ + } + return drv +} + +func PutDriver(t *testing.T) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } + } + +} + +// Creates an new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + if err := driver.Create("empty", ""); err != nil { + t.Fatal(err) + } + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") + + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + +} + +func createBase(t *testing.T, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.Create(name, ""); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} + +func DriverTestCreateBase(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + verifyBase(t, driver, "Base") + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} + +func DriverTestCreateSnap(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + + if err := driver.Create("Snap", "Base"); err != nil { + t.Fatal(err) + } + + verifyBase(t, driver, "Snap") + + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} diff --git a/daemon/graphdriver/overlay/copy.go b/daemon/graphdriver/overlay/copy.go new file mode 100644 index 00000000..f43b117a --- /dev/null +++ b/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,166 @@ +// +build linux + +package overlay + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +type CopyFlags int + +const ( + CopyHardlink CopyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags CopyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags&CopyHardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and + if !isSymlink { + if err := system.UtimesNano(dstPath, ts); err != nil { + return err + } + } else { + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 00000000..fc04057c --- /dev/null +++ b/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,429 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") +) + +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NaiveDiffDriver(driver), + applyDiff: driver, + } +} + +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.ArchiveReader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When a overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with a empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy if the parents "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +type ActiveMount struct { + count int + path string + mounted bool +} +type Driver struct { + home string + sync.Mutex // Protects concurrent modification to active + active map[string]*ActiveMount +} + +var backingFs = "" + +func init() { + graphdriver.Register("overlay", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs or aufs + switch fsMagic { + case graphdriver.FsMagicBtrfs: + logrus.Error("'overlay' is not supported over btrfs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicAufs: + logrus.Error("'overlay' is not supported over aufs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicZfs: + logrus.Error("'overlay' is not supported over zfs.") + return nil, graphdriver.ErrIncompatibleFS + } + + // Create the driver home dir + if err := os.MkdirAll(home, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + + d := &Driver{ + home: home, + active: make(map[string]*ActiveMount), + } + + return NaiveDiffDriverWithApply(d), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + } +} + +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerId, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerId)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func (d *Driver) Create(id string, parent string) (retErr error) { + dir := d.dir(id) + if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { + return err + } + if err := os.Mkdir(dir, 0700); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := os.Mkdir(path.Join(dir, "root"), 0755); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do a overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := os.Mkdir(path.Join(dir, "upper"), s.Mode()); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerId, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerId, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := os.Mkdir(upperDir, s.Mode()); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id string, mountLabel string) (string, error) { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + mount := d.active[id] + if mount != nil { + mount.count++ + return mount.path, nil + } + + mount = &ActiveMount{count: 1} + + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + mount.path = rootDir + d.active[id] = mount + return mount.path, nil + } + + lowerId, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + lowerDir := path.Join(d.dir(string(lowerId)), "root") + upperDir := path.Join(dir, "upper") + workDir := path.Join(dir, "work") + mergedDir := path.Join(dir, "merged") + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + mount.path = mergedDir + mount.mounted = true + d.active[id] = mount + + return mount.path, nil +} + +func (d *Driver) Put(id string) error { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + mount := d.active[id] + if mount == nil { + logrus.Debugf("Put on a non-mounted device %s", id) + // but it might be still here + if d.Exists(id) { + mergedDir := path.Join(d.dir(id), "merged") + err := syscall.Unmount(mergedDir, 0) + if err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + } + return nil + } + + mount.count-- + if mount.count > 0 { + return nil + } + + defer delete(d.active, id) + if mount.mounted { + err := syscall.Unmount(mount.path, 0) + if err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return err + } + return nil +} + +func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, CopyHardlink); err != nil { + return 0, err + } + + if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/daemon/graphdriver/overlay/overlay_test.go b/daemon/graphdriver/overlay/overlay_test.go new file mode 100644 index 00000000..76b6313b --- /dev/null +++ b/daemon/graphdriver/overlay/overlay_test.go @@ -0,0 +1,30 @@ +// +build linux + +package overlay + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, "overlay") +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlay") +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlay") +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlay") +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/overlay/overlay_unsupported.go b/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 00000000..3dbb4de4 --- /dev/null +++ b/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go new file mode 100644 index 00000000..d2e71dc9 --- /dev/null +++ b/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,102 @@ +// +build linux windows + +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/label" +) + +func init() { + graphdriver.Register("vfs", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + } + return graphdriver.NaiveDiffDriver(d), nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "vfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func (d *Driver) Create(id, parent string) error { + dir := d.dir(id) + if err := system.MkdirAll(filepath.Dir(dir), 0700); err != nil { + return err + } + if err := os.Mkdir(dir, 0755); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := chrootarchive.CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +func (d *Driver) Remove(id string) error { + if _, err := os.Stat(d.dir(id)); err != nil { + return err + } + return os.RemoveAll(d.dir(id)) +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/daemon/graphdriver/vfs/driver_unsupported.go b/daemon/graphdriver/vfs/driver_unsupported.go new file mode 100644 index 00000000..be2e4ab5 --- /dev/null +++ b/daemon/graphdriver/vfs/driver_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux,!windows + +package vfs diff --git a/daemon/graphdriver/vfs/vfs_test.go b/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 00000000..9ecf21db --- /dev/null +++ b/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,37 @@ +// +build linux + +package vfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/windows/windows.go b/daemon/graphdriver/windows/windows.go new file mode 100644 index 00000000..7e7c5927 --- /dev/null +++ b/daemon/graphdriver/windows/windows.go @@ -0,0 +1,326 @@ +//+build windows + +package windows + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/microsoft/hcsshim" +) + +func init() { + graphdriver.Register("windowsfilter", InitFilter) + graphdriver.Register("windowsdiff", InitDiff) +} + +const ( + diffDriver = iota + filterDriver +) + +type WindowsGraphDriver struct { + info hcsshim.DriverInfo + sync.Mutex // Protects concurrent modification to active + active map[string]int +} + +// New returns a new Windows storage filter driver. +func InitFilter(home string, options []string) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + d := &WindowsGraphDriver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + active: make(map[string]int), + } + return d, nil +} + +// New returns a new Windows differencing disk driver. +func InitDiff(home string, options []string) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitDiff at %s", home) + d := &WindowsGraphDriver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: diffDriver, + }, + active: make(map[string]int), + } + return d, nil +} + +func (d *WindowsGraphDriver) Info() hcsshim.DriverInfo { + return d.info +} + +func (d *WindowsGraphDriver) String() string { + switch d.info.Flavour { + case diffDriver: + return "windowsdiff" + case filterDriver: + return "windowsfilter" + default: + return "Unknown driver flavour" + } +} + +func (d *WindowsGraphDriver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// Exists returns true if the given id is registered with +// this driver +func (d *WindowsGraphDriver) Exists(id string) bool { + result, err := hcsshim.LayerExists(d.info, id) + if err != nil { + return false + } + return result +} + +func (d *WindowsGraphDriver) Create(id, parent string) error { + return hcsshim.CreateLayer(d.info, id, parent) +} + +func (d *WindowsGraphDriver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information +func (d *WindowsGraphDriver) Remove(id string) error { + return hcsshim.DestroyLayer(d.info, id) +} + +// Get returns the rootfs path for the id. This will mount the dir at it's given path +func (d *WindowsGraphDriver) Get(id, mountLabel string) (string, error) { + var dir string + + d.Lock() + defer d.Unlock() + + if d.active[id] == 0 { + if err := hcsshim.ActivateLayer(d.info, id); err != nil { + return "", err + } + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, id) + if err != nil { + return "", err + } + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + d.active[id]++ + + return dir, nil +} + +func (d *WindowsGraphDriver) Put(id string) error { + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + d.Lock() + defer d.Unlock() + + if d.active[id] > 1 { + d.active[id]-- + } else if d.active[id] == 1 { + if err := hcsshim.DeactivateLayer(d.info, id); err != nil { + return err + } + delete(d.active, id) + } + + return nil +} + +func (d *WindowsGraphDriver) Cleanup() error { + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *WindowsGraphDriver) Diff(id, parent string) (arch archive.Archive, err error) { + return nil, fmt.Errorf("The Windows graphdriver does not support Diff()") +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *WindowsGraphDriver) Changes(id, parent string) ([]archive.Change, error) { + return nil, fmt.Errorf("The Windows graphdriver does not support Changes()") +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (d *WindowsGraphDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) { + start := time.Now().UTC() + logrus.Debugf("WindowsGraphDriver ApplyDiff: Start untar layer") + + destination := d.dir(id) + if d.info.Flavour == diffDriver { + destination = filepath.Dir(destination) + } + + if size, err = chrootarchive.ApplyLayer(destination, diff); err != nil { + return + } + logrus.Debugf("WindowsGraphDriver ApplyDiff: Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *WindowsGraphDriver) DiffSize(id, parent string) (size int64, err error) { + changes, err := d.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +func (d *WindowsGraphDriver) CopyDiff(sourceId, id string, parentLayerPaths []string) error { + d.Lock() + defer d.Unlock() + + if d.info.Flavour == filterDriver && d.active[sourceId] == 0 { + if err := hcsshim.ActivateLayer(d.info, sourceId); err != nil { + return err + } + defer func() { + err := hcsshim.DeactivateLayer(d.info, sourceId) + if err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", sourceId, err) + } + }() + } + + return hcsshim.CopyLayer(d.info, sourceId, id, parentLayerPaths) +} + +func (d *WindowsGraphDriver) LayerIdsToPaths(ids []string) []string { + var paths []string + for _, id := range ids { + path, err := d.Get(id, "") + if err != nil { + logrus.Debug("LayerIdsToPaths: Error getting mount path for id", id, ":", err.Error()) + return nil + } + if d.Put(id) != nil { + logrus.Debug("LayerIdsToPaths: Error putting mount path for id", id, ":", err.Error()) + return nil + } + paths = append(paths, path) + } + return paths +} + +func (d *WindowsGraphDriver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *WindowsGraphDriver) Export(id string, parentLayerPaths []string) (arch archive.Archive, err error) { + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer func() { + if err != nil { + d.Put(id) + } + }() + + tempFolder := layerFs + "-temp" + if err = os.MkdirAll(tempFolder, 0755); err != nil { + logrus.Errorf("Could not create %s %s", tempFolder, err) + return + } + defer func() { + if err != nil { + if err2 := os.RemoveAll(tempFolder); err2 != nil { + logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) + } + } + }() + + if err = hcsshim.ExportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { + return + } + + archive, err := archive.Tar(tempFolder, archive.Uncompressed) + if err != nil { + return + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + d.Put(id) + if err2 := os.RemoveAll(tempFolder); err2 != nil { + logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) + } + return err + }), nil + +} + +func (d *WindowsGraphDriver) Import(id string, layerData archive.ArchiveReader, parentLayerPaths []string) (size int64, err error) { + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer func() { + if err != nil { + d.Put(id) + } + }() + + tempFolder := layerFs + "-temp" + if err = os.MkdirAll(tempFolder, 0755); err != nil { + logrus.Errorf("Could not create %s %s", tempFolder, err) + return + } + defer func() { + if err2 := os.RemoveAll(tempFolder); err2 != nil { + logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) + } + }() + + start := time.Now().UTC() + logrus.Debugf("Start untar layer") + if size, err = chrootarchive.ApplyLayer(tempFolder, layerData); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + if err = hcsshim.ImportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { + return + } + + return +} diff --git a/daemon/graphdriver/zfs/MAINTAINERS b/daemon/graphdriver/zfs/MAINTAINERS new file mode 100644 index 00000000..9c270c54 --- /dev/null +++ b/daemon/graphdriver/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go new file mode 100644 index 00000000..ece86cc0 --- /dev/null +++ b/daemon/graphdriver/zfs/zfs.go @@ -0,0 +1,307 @@ +// +build linux + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/runc/libcontainer/label" +) + +type ZfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +type Logger struct{} + +func (*Logger) Log(cmd []string) { + log.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +func Init(base string, opt []string) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + log.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + log.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + } + return graphdriver.NaiveDiffDriver(d), nil +} + +func parseOptions(opt []string) (ZfsOptions, error) { + var options ZfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + log.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +type Driver struct { + dataset *zfs.Dataset + options ZfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool +} + +func (d *Driver) String() string { + return "zfs" +} + +func (d *Driver) Cleanup() error { + return nil +} + +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) ZfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) MountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +func (d *Driver) Create(id string, parent string) error { + err := d.create(id, parent) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.ZfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent) +} + +func (d *Driver) create(id, parent string) error { + name := d.ZfsPath(id) + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + return err + } + return d.cloneFilesystem(name, d.ZfsPath(parent)) +} + +func (d *Driver) Remove(id string) error { + name := d.ZfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.MountPath(id) + filesystem := d.ZfsPath(id) + options := label.FormatMountLabel("", mountLabel) + log.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + // Create the target directories if they don't exist + if err := os.MkdirAll(mountpoint, 0755); err != nil && !os.IsExist(err) { + return "", err + } + + err := mount.Mount(filesystem, mountpoint, "zfs", options) + if err != nil { + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + return mountpoint, nil +} + +func (d *Driver) Put(id string) error { + mountpoint := d.MountPath(id) + log.Debugf(`[zfs] unmount("%s")`, mountpoint) + + if err := mount.Unmount(mountpoint); err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return nil +} + +func (d *Driver) Exists(id string) bool { + return d.filesystemsCache[d.ZfsPath(id)] == true +} diff --git a/daemon/graphdriver/zfs/zfs_freebsd.go b/daemon/graphdriver/zfs/zfs_freebsd.go new file mode 100644 index 00000000..a7e28b16 --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + "syscall" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/daemon/graphdriver/zfs/zfs_linux.go b/daemon/graphdriver/zfs/zfs_linux.go new file mode 100644 index 00000000..f840ed54 --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + "syscall" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/daemon/graphdriver/zfs/zfs_test.go b/daemon/graphdriver/zfs/zfs_test.go new file mode 100644 index 00000000..c20eb986 --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_test.go @@ -0,0 +1,30 @@ +// +build linux + +package zfs + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestZfsSetup and TestZfsTeardown +func TestZfsSetup(t *testing.T) { + graphtest.GetDriver(t, "zfs") +} + +func TestZfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "zfs") +} + +func TestZfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "zfs") +} + +func TestZfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "zfs") +} + +func TestZfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/zfs/zfs_unsupported.go b/daemon/graphdriver/zfs/zfs_unsupported.go new file mode 100644 index 00000000..643b169b --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/daemon/history.go b/daemon/history.go new file mode 100644 index 00000000..f7175087 --- /dev/null +++ b/daemon/history.go @@ -0,0 +1,31 @@ +package daemon + +import ( + "sort" +) + +// History is a convenience type for storing a list of containers, +// ordered by creation date. +type History []*Container + +func (history *History) Len() int { + return len(*history) +} + +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +func (history *History) Add(container *Container) { + *history = append(*history, container) +} + +func (history *History) Sort() { + sort.Sort(history) +} diff --git a/daemon/image_delete.go b/daemon/image_delete.go new file mode 100644 index 00000000..ccef4b33 --- /dev/null +++ b/daemon/image_delete.go @@ -0,0 +1,180 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/utils" +) + +// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/ +func (daemon *Daemon) ImageDelete(name string, force, noprune bool) ([]types.ImageDelete, error) { + list := []types.ImageDelete{} + if err := daemon.imgDeleteHelper(name, &list, true, force, noprune); err != nil { + return nil, err + } + if len(list) == 0 { + return nil, fmt.Errorf("Conflict, %s wasn't deleted", name) + } + + return list, nil +} + +func (daemon *Daemon) imgDeleteHelper(name string, list *[]types.ImageDelete, first, force, noprune bool) error { + var ( + repoName, tag string + tags = []string{} + ) + repoAndTags := make(map[string][]string) + + // FIXME: please respect DRY and centralize repo+tag parsing in a single central place! -- shykes + repoName, tag = parsers.ParseRepositoryTag(name) + if tag == "" { + tag = graph.DEFAULTTAG + } + + if name == "" { + return fmt.Errorf("Image name can not be blank") + } + + img, err := daemon.Repositories().LookupImage(name) + if err != nil { + if r, _ := daemon.Repositories().Get(repoName); r != nil { + return fmt.Errorf("No such image: %s", utils.ImageReference(repoName, tag)) + } + return fmt.Errorf("No such image: %s", name) + } + + if strings.Contains(img.ID, name) { + repoName = "" + tag = "" + } + + byParents := daemon.Graph().ByParent() + + repos := daemon.Repositories().ByID()[img.ID] + + //If delete by id, see if the id belong only to one repository + deleteByID := repoName == "" + if deleteByID { + for _, repoAndTag := range repos { + parsedRepo, parsedTag := parsers.ParseRepositoryTag(repoAndTag) + if repoName == "" || repoName == parsedRepo { + repoName = parsedRepo + if parsedTag != "" { + repoAndTags[repoName] = append(repoAndTags[repoName], parsedTag) + } + } else if repoName != parsedRepo && !force && first { + // the id belongs to multiple repos, like base:latest and user:test, + // in that case return conflict + return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) + } else { + //the id belongs to multiple repos, with -f just delete all + repoName = parsedRepo + if parsedTag != "" { + repoAndTags[repoName] = append(repoAndTags[repoName], parsedTag) + } + } + } + } else { + repoAndTags[repoName] = append(repoAndTags[repoName], tag) + } + + if !first && len(repoAndTags) > 0 { + return nil + } + + if len(repos) <= 1 || (len(repoAndTags) <= 1 && deleteByID) { + if err := daemon.canDeleteImage(img.ID, force); err != nil { + return err + } + } + + // Untag the current image + for repoName, tags := range repoAndTags { + for _, tag := range tags { + tagDeleted, err := daemon.Repositories().Delete(repoName, tag) + if err != nil { + return err + } + if tagDeleted { + *list = append(*list, types.ImageDelete{ + Untagged: utils.ImageReference(repoName, tag), + }) + daemon.EventsService.Log("untag", img.ID, "") + } + } + } + tags = daemon.Repositories().ByID()[img.ID] + if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { + if len(byParents[img.ID]) == 0 { + if err := daemon.Repositories().DeleteAll(img.ID); err != nil { + return err + } + if err := daemon.Graph().Delete(img.ID); err != nil { + return err + } + *list = append(*list, types.ImageDelete{ + Deleted: img.ID, + }) + daemon.EventsService.Log("delete", img.ID, "") + if img.Parent != "" && !noprune { + err := daemon.imgDeleteHelper(img.Parent, list, false, force, noprune) + if first { + return err + } + + } + + } + } + return nil +} + +func (daemon *Daemon) canDeleteImage(imgID string, force bool) error { + if daemon.Graph().IsHeld(imgID) { + return fmt.Errorf("Conflict, cannot delete because %s is held by an ongoing pull or build", stringid.TruncateID(imgID)) + } + for _, container := range daemon.List() { + if container.ImageID == "" { + // This technically should never happen, but if the container + // has no ImageID then log the situation and move on. + // If we allowed processing to continue then the code later + // on would fail with a "Prefix can't be empty" error even + // though the bad container has nothing to do with the image + // we're trying to delete. + logrus.Errorf("Container %q has no image associated with it!", container.ID) + continue + } + parent, err := daemon.Repositories().LookupImage(container.ImageID) + if err != nil { + if daemon.Graph().IsNotExist(err, container.ImageID) { + continue + } + return err + } + + if err := daemon.graph.WalkHistory(parent, func(p image.Image) error { + if imgID == p.ID { + if container.IsRunning() { + if force { + return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", stringid.TruncateID(imgID), stringid.TruncateID(container.ID)) + } + return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", stringid.TruncateID(imgID), stringid.TruncateID(container.ID)) + } else if !force { + return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", stringid.TruncateID(imgID), stringid.TruncateID(container.ID)) + } + } + return nil + }); err != nil { + return err + } + } + return nil +} diff --git a/daemon/info.go b/daemon/info.go new file mode 100644 index 00000000..387c3995 --- /dev/null +++ b/daemon/info.go @@ -0,0 +1,114 @@ +package daemon + +import ( + "os" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + images := daemon.Graph().Map() + var imgcount int + if images == nil { + imgcount = 0 + } else { + imgcount = len(images) + } + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err == nil { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err == nil { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + } + + // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) + initPath := utils.DockerInitPath("") + if initPath == "" { + // if that fails, we'll just return the path from the daemon + initPath = daemon.SystemInitPath() + } + + v := &types.Info{ + ID: daemon.ID, + Containers: len(daemon.List()), + Images: imgcount, + Driver: daemon.GraphDriver().String(), + DriverStatus: daemon.GraphDriver().Status(), + IPv4Forwarding: !daemon.SystemConfig().IPv4ForwardingDisabled, + BridgeNfIptables: !daemon.SystemConfig().BridgeNfCallIptablesDisabled, + BridgeNfIp6tables: !daemon.SystemConfig().BridgeNfCallIp6tablesDisabled, + Debug: os.Getenv("DEBUG") != "", + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + ExecutionDriver: daemon.ExecutionDriver().Name(), + LoggingDriver: daemon.defaultLogConfig.Type, + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + RegistryConfig: daemon.RegistryService.Config, + InitSha1: dockerversion.INITSHA1, + InitPath: initPath, + NCPU: runtime.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.Config().Root, + Labels: daemon.Config().Labels, + ExperimentalBuild: utils.ExperimentalBuild(), + } + + // TODO Windows. Refactor this more once sysinfo is refactored into + // platform specific code. On Windows, sysinfo.cgroupMemInfo and + // sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if + // an attempt is made to access through them. + if runtime.GOOS != "windows" { + v.MemoryLimit = daemon.SystemConfig().MemoryLimit + v.SwapLimit = daemon.SystemConfig().SwapLimit + v.OomKillDisable = daemon.SystemConfig().OomKillDisable + v.CpuCfsPeriod = daemon.SystemConfig().CpuCfsPeriod + v.CpuCfsQuota = daemon.SystemConfig().CpuCfsQuota + } + + if httpProxy := os.Getenv("http_proxy"); httpProxy != "" { + v.HttpProxy = httpProxy + } + if httpsProxy := os.Getenv("https_proxy"); httpsProxy != "" { + v.HttpsProxy = httpsProxy + } + if noProxy := os.Getenv("no_proxy"); noProxy != "" { + v.NoProxy = noProxy + } + if hostname, err := os.Hostname(); err == nil { + v.Name = hostname + } + + return v, nil +} diff --git a/daemon/inspect.go b/daemon/inspect.go new file mode 100644 index 00000000..d38471a0 --- /dev/null +++ b/daemon/inspect.go @@ -0,0 +1,138 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" +) + +func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) { + container, err := daemon.Get(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container) + if err != nil { + return nil, err + } + + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Relabel, + RW: m.RW, + }) + } + + return &types.ContainerJSON{base, mountPoints, container.Config}, nil +} + +func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) { + container, err := daemon.Get(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &types.ContainerConfig{ + container.Config, + container.hostConfig.Memory, + container.hostConfig.MemorySwap, + container.hostConfig.CpuShares, + container.hostConfig.CpusetCpus, + } + + return &types.ContainerJSONPre120{base, volumes, volumesRW, config}, nil +} + +func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.hostConfig + + if children, err := daemon.Children(container.Name); err == nil { + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + } + // we need this trick to preserve empty log driver, so + // container will use daemon defaults even if daemon change them + if hostConfig.LogConfig.Type == "" { + hostConfig.LogConfig = daemon.defaultLogConfig + } + + containerState := &types.ContainerState{ + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode, + Error: container.State.Error, + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + } + + contJSONBase := &types.ContainerJSONBase{ + Id: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID, + NetworkSettings: container.NetworkSettings, + ResolvConfPath: container.ResolvConfPath, + HostnamePath: container.HostnamePath, + HostsPath: container.HostsPath, + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + ExecDriver: container.ExecDriver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + AppArmorProfile: container.AppArmorProfile, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + contJSONBase.GraphDriver.Name = container.Driver + graphDriverData, err := daemon.driver.GetMetadata(container.ID) + if err != nil { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) { + eConfig, err := daemon.getExecConfig(id) + if err != nil { + return nil, err + } + return eConfig, nil +} diff --git a/daemon/kill.go b/daemon/kill.go new file mode 100644 index 00000000..7a4d9ce8 --- /dev/null +++ b/daemon/kill.go @@ -0,0 +1,27 @@ +package daemon + +import "syscall" + +// ContainerKill send signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + if err := container.Kill(); err != nil { + return err + } + } else { + // Otherwise, just send the requested signal + if err := container.KillSig(int(sig)); err != nil { + return err + } + } + return nil +} diff --git a/daemon/list.go b/daemon/list.go new file mode 100644 index 00000000..5c8315cb --- /dev/null +++ b/daemon/list.go @@ -0,0 +1,205 @@ +package daemon + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/parsers/filters" +) + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*Container { + return daemon.containers.List() +} + +type ContainersConfig struct { + All bool + Since string + Before string + Limit int + Size bool + Filters string +} + +func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) { + var ( + foundBefore bool + displayed int + all = config.All + n = config.Limit + psFilters filters.Args + filtExited []int + ) + containers := []*types.Container{} + + psFilters, err := filters.FromParam(config.Filters) + if err != nil { + return nil, err + } + if i, ok := psFilters["exited"]; ok { + for _, value := range i { + code, err := strconv.Atoi(value) + if err != nil { + return nil, err + } + filtExited = append(filtExited, code) + } + } + + if i, ok := psFilters["status"]; ok { + for _, value := range i { + if !isValidStateString(value) { + return nil, errors.New("Unrecognised filter value for status") + } + if value == "exited" || value == "created" { + all = true + } + } + } + names := map[string][]string{} + daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { + names[e.ID()] = append(names[e.ID()], p) + return nil + }, 1) + + var beforeCont, sinceCont *Container + if config.Before != "" { + beforeCont, err = daemon.Get(config.Before) + if err != nil { + return nil, err + } + } + + if config.Since != "" { + sinceCont, err = daemon.Get(config.Since) + if err != nil { + return nil, err + } + } + + errLast := errors.New("last container") + writeCont := func(container *Container) error { + container.Lock() + defer container.Unlock() + if !container.Running && !all && n <= 0 && config.Since == "" && config.Before == "" { + return nil + } + if !psFilters.Match("name", container.Name) { + return nil + } + + if !psFilters.Match("id", container.ID) { + return nil + } + + if !psFilters.MatchKVList("label", container.Config.Labels) { + return nil + } + + if config.Before != "" && !foundBefore { + if container.ID == beforeCont.ID { + foundBefore = true + } + return nil + } + if n > 0 && displayed == n { + return errLast + } + if config.Since != "" { + if container.ID == sinceCont.ID { + return errLast + } + } + if len(filtExited) > 0 { + shouldSkip := true + for _, code := range filtExited { + if code == container.ExitCode && !container.Running { + shouldSkip = false + break + } + } + if shouldSkip { + return nil + } + } + + if !psFilters.Match("status", container.State.StateString()) { + return nil + } + displayed++ + newC := &types.Container{ + ID: container.ID, + Names: names[container.ID], + } + newC.Image = container.Config.Image + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + newC.Command = fmt.Sprintf("%s", container.Path) + } + newC.Created = int(container.Created.Unix()) + newC.Status = container.State.String() + newC.HostConfig.NetworkMode = string(container.HostConfig().NetworkMode) + + newC.Ports = []types.Port{} + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + return err + } + if len(bindings) == 0 { + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + return err + } + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + PublicPort: h, + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + + if config.Size { + sizeRw, sizeRootFs := container.GetSize() + newC.SizeRw = int(sizeRw) + newC.SizeRootFs = int(sizeRootFs) + } + newC.Labels = container.Config.Labels + containers = append(containers, newC) + return nil + } + + for _, container := range daemon.List() { + if err := writeCont(container); err != nil { + if err != errLast { + return nil, err + } + break + } + } + return containers, nil +} diff --git a/daemon/logdrivers_linux.go b/daemon/logdrivers_linux.go new file mode 100644 index 00000000..ded4d27d --- /dev/null +++ b/daemon/logdrivers_linux.go @@ -0,0 +1,11 @@ +package daemon + +// Importing packages here only to make sure their init gets called and +// therefore they register themselves to the logdriver factory. +import ( + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/gelf" + _ "github.com/docker/docker/daemon/logger/journald" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/daemon/logdrivers_windows.go b/daemon/logdrivers_windows.go new file mode 100644 index 00000000..5dcbe718 --- /dev/null +++ b/daemon/logdrivers_windows.go @@ -0,0 +1,7 @@ +package daemon + +// Importing packages here only to make sure their init gets called and +// therefore they register themselves to the logdriver factory. +import ( + _ "github.com/docker/docker/daemon/logger/jsonfilelog" +) diff --git a/daemon/logger/copier.go b/daemon/logger/copier.go new file mode 100644 index 00000000..6fe2b007 --- /dev/null +++ b/daemon/logger/copier.go @@ -0,0 +1,71 @@ +package logger + +import ( + "bufio" + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +// Copier can copy logs from specified sources to Logger and attach +// ContainerID and Timestamp. +// Writes are concurrent, so you need implement some sync in your logger +type Copier struct { + // cid is container id for which we copying logs + cid string + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup +} + +// NewCopier creates new Copier +func NewCopier(cid string, srcs map[string]io.Reader, dst Logger) (*Copier, error) { + return &Copier{ + cid: cid, + srcs: srcs, + dst: dst, + }, nil +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + reader := bufio.NewReader(src) + + for { + line, err := reader.ReadBytes('\n') + line = bytes.TrimSuffix(line, []byte{'\n'}) + + // ReadBytes can return full or partial output even when it failed. + // e.g. it can return a full entry and EOF. + if err == nil || len(line) > 0 { + if logErr := c.dst.Log(&Message{ContainerID: c.cid, Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr) + } + } + + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + } + return + } + + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} diff --git a/daemon/logger/copier_test.go b/daemon/logger/copier_test.go new file mode 100644 index 00000000..2db7b8e2 --- /dev/null +++ b/daemon/logger/copier_test.go @@ -0,0 +1,99 @@ +package logger + +import ( + "bytes" + "encoding/json" + "io" + "testing" + "time" +) + +type TestLoggerJSON struct { + *json.Encoder +} + +func (l *TestLoggerJSON) Log(m *Message) error { return l.Encode(m) } + +func (l *TestLoggerJSON) Close() error { return nil } + +func (l *TestLoggerJSON) Name() string { return "json" } + +type TestLoggerText struct { + *bytes.Buffer +} + +func (l *TestLoggerText) Log(m *Message) error { + _, err := l.WriteString(m.ContainerID + " " + m.Source + " " + string(m.Line) + "\n") + return err +} + +func (l *TestLoggerText) Close() error { return nil } + +func (l *TestLoggerText) Name() string { return "text" } + +func TestCopier(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + stderrLine := "Line that thinks that it is log line from docker stderr" + var stdout bytes.Buffer + var stderr bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { + t.Fatal(err) + } + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + c, err := NewCopier(cid, + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + if err != nil { + t.Fatal(err) + } + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.ContainerID != cid { + t.Fatalf("Wrong ContainerID: %q, expected %q", msg.ContainerID, cid) + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLine { + t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stdoutLine) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLine { + t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stderrLine) + } + } + } +} diff --git a/daemon/logger/factory.go b/daemon/logger/factory.go new file mode 100644 index 00000000..f40655ae --- /dev/null +++ b/daemon/logger/factory.go @@ -0,0 +1,119 @@ +package logger + +import ( + "fmt" + "os" + "strings" + "sync" + "time" +) + +// Creator is a method that builds a logging driver instance with given context +type Creator func(Context) (Logger, error) + +//LogOptValidator is a method that validates the log opts provided +type LogOptValidator func(cfg map[string]string) error + +// Context provides enough information for a logging driver to do its function +type Context struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + LogPath string +} + +// Hostname returns the hostname from the underlying OS +func (ctx *Context) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was started with +func (ctx *Context) Command() string { + terms := []string{ctx.ContainerEntrypoint} + for _, arg := range ctx.ContainerArgs { + terms = append(terms, arg) + } + command := strings.Join(terms, " ") + return command +} + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.registry[name]; ok { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + lf.registry[name] = c + return nil +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if !ok { + return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + return c, nil +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +func ValidateLogOpts(name string, cfg map[string]string) error { + l := factory.getLogOptValidator(name) + if l != nil { + return l(cfg) + } + return nil +} diff --git a/daemon/logger/fluentd/fluentd.go b/daemon/logger/fluentd/fluentd.go new file mode 100644 index 00000000..97205dde --- /dev/null +++ b/daemon/logger/fluentd/fluentd.go @@ -0,0 +1,140 @@ +package fluentd + +import ( + "bytes" + "fmt" + "math" + "net" + "strconv" + "strings" + "text/template" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/fluent/fluent-logger-golang/fluent" +) + +type Fluentd struct { + tag string + containerID string + containerName string + writer *fluent.Fluent +} + +type Receiver struct { + ID string + FullID string + Name string +} + +const ( + name = "fluentd" + defaultHostName = "localhost" + defaultPort = 24224 + defaultTagPrefix = "docker" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +func parseConfig(ctx logger.Context) (string, int, string, error) { + host := defaultHostName + port := defaultPort + tag := "docker." + ctx.ContainerID[:12] + + config := ctx.Config + + if address := config["fluentd-address"]; address != "" { + if h, p, err := net.SplitHostPort(address); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", 0, "", err + } + host = h + } else { + portnum, err := strconv.Atoi(p) + if err != nil { + return "", 0, "", err + } + host = h + port = portnum + } + } + + if config["fluentd-tag"] != "" { + receiver := &Receiver{ + ID: ctx.ContainerID[:12], + FullID: ctx.ContainerID, + Name: ctx.ContainerName, + } + tmpl, err := template.New("tag").Parse(config["fluentd-tag"]) + if err != nil { + return "", 0, "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, receiver); err != nil { + return "", 0, "", err + } + tag = buf.String() + } + + return host, port, tag, nil +} + +func New(ctx logger.Context) (logger.Logger, error) { + host, port, tag, err := parseConfig(ctx) + if err != nil { + return nil, err + } + logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s.", ctx.ContainerID, host, port, tag) + + // logger tries to recoonect 2**32 - 1 times + // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] + log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32}) + if err != nil { + return nil, err + } + return &Fluentd{ + tag: tag, + containerID: ctx.ContainerID, + containerName: ctx.ContainerName, + writer: log, + }, nil +} + +func (f *Fluentd) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + // fluent-logger-golang buffers logs from failures and disconnections, + // and these are transferred again automatically. + return f.writer.PostWithTime(f.tag, msg.Timestamp, data) +} + +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "fluentd-address": + case "fluentd-tag": + default: + return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) + } + } + return nil +} + +func (f *Fluentd) Close() error { + return f.writer.Close() +} + +func (f *Fluentd) Name() string { + return name +} diff --git a/daemon/logger/gelf/gelf.go b/daemon/logger/gelf/gelf.go new file mode 100644 index 00000000..22c734bc --- /dev/null +++ b/daemon/logger/gelf/gelf.go @@ -0,0 +1,159 @@ +// +build linux + +package gelf + +import ( + "bytes" + "fmt" + "net" + "net/url" + "time" + + "github.com/Graylog2/go-gelf/gelf" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/urlutil" +) + +const name = "gelf" + +type GelfLogger struct { + writer *gelf.Writer + ctx logger.Context + fields GelfFields +} + +type GelfFields struct { + hostname string + containerId string + containerName string + imageId string + imageName string + command string + tag string + created time.Time +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +func New(ctx logger.Context) (logger.Logger, error) { + // parse gelf address + address, err := parseAddress(ctx.Config["gelf-address"]) + if err != nil { + return nil, err + } + + // collect extra data for GELF message + hostname, err := ctx.Hostname() + if err != nil { + return nil, fmt.Errorf("gelf: cannot access hostname to set source field") + } + + // remove trailing slash from container name + containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") + + fields := GelfFields{ + hostname: hostname, + containerId: ctx.ContainerID, + containerName: string(containerName), + imageId: ctx.ContainerImageID, + imageName: ctx.ContainerImageName, + command: ctx.Command(), + tag: ctx.Config["gelf-tag"], + created: ctx.ContainerCreated, + } + + // create new gelfWriter + gelfWriter, err := gelf.NewWriter(address) + if err != nil { + return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) + } + + return &GelfLogger{ + writer: gelfWriter, + ctx: ctx, + fields: fields, + }, nil +} + +func (s *GelfLogger) Log(msg *logger.Message) error { + // remove trailing and leading whitespace + short := bytes.TrimSpace([]byte(msg.Line)) + + level := gelf.LOG_INFO + if msg.Source == "stderr" { + level = gelf.LOG_ERR + } + + m := gelf.Message{ + Version: "1.1", + Host: s.fields.hostname, + Short: string(short), + TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, + Level: level, + Extra: map[string]interface{}{ + "_container_id": s.fields.containerId, + "_container_name": s.fields.containerName, + "_image_id": s.fields.imageId, + "_image_name": s.fields.imageName, + "_command": s.fields.command, + "_tag": s.fields.tag, + "_created": s.fields.created, + }, + } + + if err := s.writer.WriteMessage(&m); err != nil { + return fmt.Errorf("gelf: cannot send GELF message: %v", err) + } + return nil +} + +func (s *GelfLogger) Close() error { + return s.writer.Close() +} + +func (s *GelfLogger) Name() string { + return name +} + +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "gelf-address": + case "gelf-tag": + default: + return fmt.Errorf("unknown log opt '%s' for gelf log driver", key) + } + } + return nil +} + +func parseAddress(address string) (string, error) { + if urlutil.IsTransportURL(address) { + url, err := url.Parse(address) + if err != nil { + return "", err + } + + // we support only udp + if url.Scheme != "udp" { + return "", fmt.Errorf("gelf: endpoint needs to be UDP") + } + + // get host and port + if _, _, err = net.SplitHostPort(url.Host); err != nil { + return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") + } + + return url.Host, nil + } + + return "", nil +} diff --git a/daemon/logger/gelf/gelf_unsupported.go b/daemon/logger/gelf/gelf_unsupported.go new file mode 100644 index 00000000..266f73b1 --- /dev/null +++ b/daemon/logger/gelf/gelf_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package gelf diff --git a/daemon/logger/journald/journald.go b/daemon/logger/journald/journald.go new file mode 100644 index 00000000..4dd88c14 --- /dev/null +++ b/daemon/logger/journald/journald.go @@ -0,0 +1,55 @@ +// +build linux + +package journald + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" +) + +const name = "journald" + +type Journald struct { + Jmap map[string]string +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } +} + +func New(ctx logger.Context) (logger.Logger, error) { + if !journal.Enabled() { + return nil, fmt.Errorf("journald is not enabled on this host") + } + // Strip a leading slash so that people can search for + // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo. + name := ctx.ContainerName + if name[0] == '/' { + name = name[1:] + } + jmap := map[string]string{ + "CONTAINER_ID": ctx.ContainerID[:12], + "CONTAINER_ID_FULL": ctx.ContainerID, + "CONTAINER_NAME": name} + return &Journald{Jmap: jmap}, nil +} + +func (s *Journald) Log(msg *logger.Message) error { + if msg.Source == "stderr" { + return journal.Send(string(msg.Line), journal.PriErr, s.Jmap) + } + return journal.Send(string(msg.Line), journal.PriInfo, s.Jmap) +} + +func (s *Journald) Close() error { + return nil +} + +func (s *Journald) Name() string { + return name +} diff --git a/daemon/logger/journald/journald_unsupported.go b/daemon/logger/journald/journald_unsupported.go new file mode 100644 index 00000000..110833c2 --- /dev/null +++ b/daemon/logger/journald/journald_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package journald diff --git a/daemon/logger/jsonfilelog/jsonfilelog.go b/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 00000000..4703f64b --- /dev/null +++ b/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,382 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "sync" + "time" + + "gopkg.in/fsnotify.v1" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/pkg/tailfile" + "github.com/docker/docker/pkg/timeutils" + "github.com/docker/docker/pkg/units" +) + +const ( + Name = "json-file" + maxJSONDecodeRetry = 10 +) + +// JSONFileLogger is Logger implementation for default docker logging: +// JSON objects to file +type JSONFileLogger struct { + buf *bytes.Buffer + f *os.File // store for closing + mu sync.Mutex // protects buffer + capacity int64 //maximum size of each file + n int //maximum number of files + ctx logger.Context + readers map[*logger.LogWatcher]struct{} // stores the active log followers + notifyRotate *pubsub.Publisher +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename +func New(ctx logger.Context) (logger.Logger, error) { + log, err := os.OpenFile(ctx.LogPath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) + if err != nil { + return nil, err + } + var capval int64 = -1 + if capacity, ok := ctx.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles int = 1 + if maxFileString, ok := ctx.Config["max-file"]; ok { + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-files cannot be less than 1.") + } + } + return &JSONFileLogger{ + f: log, + buf: bytes.NewBuffer(nil), + ctx: ctx, + capacity: capval, + n: maxFiles, + readers: make(map[*logger.LogWatcher]struct{}), + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file +func (l *JSONFileLogger) Log(msg *logger.Message) error { + l.mu.Lock() + defer l.mu.Unlock() + + timestamp, err := timeutils.FastMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + err = (&jsonlog.JSONLogBytes{Log: append(msg.Line, '\n'), Stream: msg.Source, Created: timestamp}).MarshalJSONBuf(l.buf) + if err != nil { + return err + } + l.buf.WriteByte('\n') + _, err = writeLog(l) + return err +} + +func writeLog(l *JSONFileLogger) (int64, error) { + if l.capacity == -1 { + return writeToBuf(l) + } + meta, err := l.f.Stat() + if err != nil { + return -1, err + } + if meta.Size() >= l.capacity { + name := l.f.Name() + if err := l.f.Close(); err != nil { + return -1, err + } + if err := rotate(name, l.n); err != nil { + return -1, err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666) + if err != nil { + return -1, err + } + l.f = file + l.notifyRotate.Publish(struct{}{}) + } + return writeToBuf(l) +} + +func writeToBuf(l *JSONFileLogger) (int64, error) { + i, err := l.buf.WriteTo(l.f) + if err != nil { + l.buf = bytes.NewBuffer(nil) + } + return i, err +} + +func rotate(name string, n int) error { + if n < 2 { + return nil + } + for i := n - 1; i > 1; i-- { + oldFile := name + "." + strconv.Itoa(i) + replacingFile := name + "." + strconv.Itoa(i-1) + if err := backup(oldFile, replacingFile); err != nil { + return err + } + } + if err := backup(name+".1", name); err != nil { + return err + } + return nil +} + +func backup(old, curr string) error { + if _, err := os.Stat(old); !os.IsNotExist(err) { + err := os.Remove(old) + if err != nil { + return err + } + } + if _, err := os.Stat(curr); os.IsNotExist(err) { + f, err := os.Create(curr) + if err != nil { + return err + } + f.Close() + } + return os.Rename(curr, old) +} + +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +func (l *JSONFileLogger) LogPath() string { + return l.ctx.LogPath +} + +// Close closes underlying file and signals all readers to stop +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + err := l.f.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger +func (l *JSONFileLogger) Name() string { + return Name +} + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + } + return msg, nil +} + +// Reads from the log file +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + pth := l.ctx.LogPath + var files []io.ReadSeeker + for i := l.n; i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + break + } + continue + } + defer f.Close() + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- err + return + } + defer latestFile.Close() + + files = append(files, latestFile) + tailer := ioutils.MultiReadSeeker(files...) + + if config.Tail != 0 { + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + if !config.Follow { + return + } + + if config.Tail >= 0 { + latestFile.Seek(0, os.SEEK_END) + } + + l.mu.Lock() + l.readers[logWatcher] = struct{}{} + l.mu.Unlock() + + notifyRotate := l.notifyRotate.Subscribe() + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() + + l.notifyRotate.Evict(notifyRotate) +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + fileWatcher, err := fsnotify.NewWatcher() + if err != nil { + logWatcher.Err <- err + return + } + defer fileWatcher.Close() + if err := fileWatcher.Add(f.Name()); err != nil { + logWatcher.Err <- err + return + } + + var retries int + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries += 1 + continue + } + logWatcher.Err <- err + return + } + + select { + case <-fileWatcher.Events: + dec = json.NewDecoder(f) + continue + case <-fileWatcher.Errors: + logWatcher.Err <- err + return + case <-logWatcher.WatchClose(): + return + case <-notifyRotate: + fileWatcher.Remove(f.Name()) + + f, err = os.Open(f.Name()) + if err != nil { + logWatcher.Err <- err + return + } + if err := fileWatcher.Add(f.Name()); err != nil { + logWatcher.Err <- err + } + dec = json.NewDecoder(f) + continue + } + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-logWatcher.WatchClose(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/daemon/logger/jsonfilelog/jsonfilelog_test.go b/daemon/logger/jsonfilelog/jsonfilelog_test.go new file mode 100644 index 00000000..cd8a0f71 --- /dev/null +++ b/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -0,0 +1,151 @@ +package jsonfilelog + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/jsonlog" +) + +func TestJSONFileLogger(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line1"), Source: "src1"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line2"), Source: "src2"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line3"), Source: "src3"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } +} + +func BenchmarkJSONFileLogger(b *testing.B) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + + testLine := "Line that thinks that it is log line from docker\n" + msg := &logger.Message{ContainerID: cid, Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } + } +} + +func TestJSONFileLoggerWithOpts(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"max-file": "2", "max-size": "1k"} + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + Config: config, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + for i := 0; i < 20; i++ { + if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { + t.Fatal(err) + } + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + penUlt, err := ioutil.ReadFile(filename + ".1") + if err != nil { + t.Fatal(err) + } + + expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } + if string(penUlt) != expectedPenultimate { + t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) + } + +} diff --git a/daemon/logger/logger.go b/daemon/logger/logger.go new file mode 100644 index 00000000..99b4a358 --- /dev/null +++ b/daemon/logger/logger.go @@ -0,0 +1,79 @@ +package logger + +import ( + "errors" + "time" + + "github.com/docker/docker/pkg/timeutils" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs +var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers + TimeFormat = timeutils.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +// Message is datastructure that represents record from some container +type Message struct { + ContainerID string + Line []byte + Source string + Timestamp time.Time +} + +// Logger is the interface for docker logging drivers +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface +type LogWatcher struct { + // For sending log messages to a reader + Msg chan *Message + // For sending error messages that occur while while reading logs + Err chan error + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop +func (w *LogWatcher) Close() { + // only close if not already closed + select { + case <-w.closeNotifier: + default: + close(w.closeNotifier) + } +} + +// WatchClose returns a channel receiver that receives notification when the watcher has been closed +// This should only be called from one goroutine +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} diff --git a/daemon/logger/syslog/syslog.go b/daemon/logger/syslog/syslog.go new file mode 100644 index 00000000..9c8c321b --- /dev/null +++ b/daemon/logger/syslog/syslog.go @@ -0,0 +1,163 @@ +// +build linux + +package syslog + +import ( + "errors" + "fmt" + "log/syslog" + "net" + "net/url" + "os" + "path" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/urlutil" +) + +const name = "syslog" + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type Syslog struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +func New(ctx logger.Context) (logger.Logger, error) { + tag := ctx.Config["syslog-tag"] + if tag == "" { + tag = ctx.ContainerID[:12] + } + + proto, address, err := parseAddress(ctx.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(ctx.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + log, err := syslog.Dial( + proto, + address, + facility, + path.Base(os.Args[0])+"/"+tag, + ) + if err != nil { + return nil, err + } + + return &Syslog{ + writer: log, + }, nil +} + +func (s *Syslog) Log(msg *logger.Message) error { + if msg.Source == "stderr" { + return s.writer.Err(string(msg.Line)) + } + return s.writer.Info(string(msg.Line)) +} + +func (s *Syslog) Close() error { + return s.writer.Close() +} + +func (s *Syslog) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if urlutil.IsTransportURL(address) { + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix socket validation + if url.Scheme == "unix" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil + } + + return "", "", nil +} + +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "syslog-address": + case "syslog-tag": + case "syslog-facility": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} diff --git a/daemon/logger/syslog/syslog_unsupported.go b/daemon/logger/syslog/syslog_unsupported.go new file mode 100644 index 00000000..50cc51b6 --- /dev/null +++ b/daemon/logger/syslog/syslog_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package syslog diff --git a/daemon/logs.go b/daemon/logs.go new file mode 100644 index 00000000..e032c571 --- /dev/null +++ b/daemon/logs.go @@ -0,0 +1,83 @@ +package daemon + +import ( + "fmt" + "io" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/stdcopy" +) + +type ContainerLogsConfig struct { + Follow, Timestamps bool + Tail string + Since time.Time + UseStdout, UseStderr bool + OutStream io.Writer + Stop <-chan bool +} + +func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error { + if !(config.UseStdout || config.UseStderr) { + return fmt.Errorf("You must choose at least one stream") + } + + outStream := config.OutStream + errStream := outStream + if !container.Config.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + cLog, err := container.getLogger() + if err != nil { + return err + } + logReader, ok := cLog.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + + follow := config.Follow && container.IsRunning() + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + logrus.Debug("logs: begin stream") + readConfig := logger.ReadConfig{ + Since: config.Since, + Tail: tailLines, + Follow: follow, + } + logs := logReader.ReadLogs(readConfig) + + for { + select { + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + return nil + case <-config.Stop: + logs.Close() + return nil + case msg, ok := <-logs.Msg: + if !ok { + logrus.Debugf("logs: end stream") + return nil + } + logLine := msg.Line + if config.Timestamps { + logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) + } + if msg.Source == "stdout" && config.UseStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.UseStderr { + errStream.Write(logLine) + } + } + } +} diff --git a/daemon/monitor.go b/daemon/monitor.go new file mode 100644 index 00000000..1f020574 --- /dev/null +++ b/daemon/monitor.go @@ -0,0 +1,339 @@ +package daemon + +import ( + "io" + "os/exec" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" +) + +const ( + defaultTimeIncrement = 100 + loggerCloseTimeout = 10 * time.Second +) + +// containerMonitor monitors the execution of a container's main process. +// If a restart policy is specified for the container the monitor will ensure that the +// process is restarted based on the rules of the policy. When the container is finally stopped +// the monitor will reset and cleanup any of the container resources such as networking allocations +// and the rootfs +type containerMonitor struct { + mux sync.Mutex + + // container is the container being monitored + container *Container + + // restartPolicy is the current policy being applied to the container monitor + restartPolicy runconfig.RestartPolicy + + // failureCount is the number of times the container has failed to + // start in a row + failureCount int + + // shouldStop signals the monitor that the next time the container exits it is + // either because docker or the user asked for the container to be stopped + shouldStop bool + + // startSignal is a channel that is closes after the container initially starts + startSignal chan struct{} + + // stopChan is used to signal to the monitor whenever there is a wait for the + // next restart so that the timeIncrement is not honored and the user is not + // left waiting for nothing to happen during this time + stopChan chan struct{} + + // timeIncrement is the amount of time to wait between restarts + // this is in milliseconds + timeIncrement int + + // lastStartTime is the time which the monitor last exec'd the container's process + lastStartTime time.Time +} + +// newContainerMonitor returns an initialized containerMonitor for the provided container +// honoring the provided restart policy +func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor { + return &containerMonitor{ + container: container, + restartPolicy: policy, + timeIncrement: defaultTimeIncrement, + stopChan: make(chan struct{}), + startSignal: make(chan struct{}), + } +} + +// Stop signals to the container monitor that it should stop monitoring the container +// for exits the next time the process dies +func (m *containerMonitor) ExitOnNext() { + m.mux.Lock() + + // we need to protect having a double close of the channel when stop is called + // twice or else we will get a panic + if !m.shouldStop { + m.shouldStop = true + close(m.stopChan) + } + + m.mux.Unlock() +} + +// Close closes the container's resources such as networking allocations and +// unmounts the contatiner's root filesystem +func (m *containerMonitor) Close() error { + // Cleanup networking and mounts + m.container.cleanup() + + // FIXME: here is race condition between two RUN instructions in Dockerfile + // because they share same runconfig and change image. Must be fixed + // in builder/builder.go + if err := m.container.toDisk(); err != nil { + logrus.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err) + + return err + } + + return nil +} + +// Start starts the containers process and monitors it according to the restart policy +func (m *containerMonitor) Start() error { + var ( + err error + exitStatus execdriver.ExitStatus + // this variable indicates where we in execution flow: + // before Run or after + afterRun bool + ) + + // ensure that when the monitor finally exits we release the networking and unmount the rootfs + defer func() { + if afterRun { + m.container.Lock() + m.container.setStopped(&exitStatus) + defer m.container.Unlock() + } + m.Close() + }() + + // reset the restart count + m.container.RestartCount = -1 + + for { + m.container.RestartCount++ + + if err := m.container.startLogging(); err != nil { + m.resetContainer(false) + + return err + } + + pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) + + m.container.LogEvent("start") + + m.lastStartTime = time.Now() + + if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil { + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + if m.container.RestartCount == 0 { + m.container.ExitCode = -1 + m.resetContainer(false) + + return err + } + + logrus.Errorf("Error running container: %s", err) + } + + // here container.Lock is already lost + afterRun = true + + m.resetMonitor(err == nil && exitStatus.ExitCode == 0) + + if m.shouldRestart(exitStatus.ExitCode) { + m.container.SetRestarting(&exitStatus) + if exitStatus.OOMKilled { + m.container.LogEvent("oom") + } + m.container.LogEvent("die") + m.resetContainer(true) + + // sleep with a small time increment between each restart to help avoid issues cased by quickly + // restarting the container because of some types of errors ( networking cut out, etc... ) + m.waitForNextRestart() + + // we need to check this before reentering the loop because the waitForNextRestart could have + // been terminated by a request from a user + if m.shouldStop { + return err + } + continue + } + if exitStatus.OOMKilled { + m.container.LogEvent("oom") + } + m.container.LogEvent("die") + m.resetContainer(true) + return err + } +} + +// resetMonitor resets the stateful fields on the containerMonitor based on the +// previous runs success or failure. Regardless of success, if the container had +// an execution time of more than 10s then reset the timer back to the default +func (m *containerMonitor) resetMonitor(successful bool) { + executionTime := time.Now().Sub(m.lastStartTime).Seconds() + + if executionTime > 10 { + m.timeIncrement = defaultTimeIncrement + } else { + // otherwise we need to increment the amount of time we wait before restarting + // the process. We will build up by multiplying the increment by 2 + m.timeIncrement *= 2 + } + + // the container exited successfully so we need to reset the failure counter + if successful { + m.failureCount = 0 + } else { + m.failureCount++ + } +} + +// waitForNextRestart waits with the default time increment to restart the container unless +// a user or docker asks for the container to be stopped +func (m *containerMonitor) waitForNextRestart() { + select { + case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond): + case <-m.stopChan: + } +} + +// shouldRestart checks the restart policy and applies the rules to determine if +// the container's process should be restarted +func (m *containerMonitor) shouldRestart(exitCode int) bool { + m.mux.Lock() + defer m.mux.Unlock() + + // do not restart if the user or docker has requested that this container be stopped + if m.shouldStop { + return false + } + + switch { + case m.restartPolicy.IsAlways(): + return true + case m.restartPolicy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max { + logrus.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", + stringid.TruncateID(m.container.ID), max) + return false + } + + return exitCode != 0 + } + + return false +} + +// callback ensures that the container's state is properly updated after we +// received ack from the execution drivers +func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int) { + if processConfig.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave + // which we close here. + if c, ok := processConfig.Stdout.(io.Closer); ok { + c.Close() + } + } + + m.container.setRunning(pid) + + // signal that the process has started + // close channel only if not closed + select { + case <-m.startSignal: + default: + close(m.startSignal) + } + + if err := m.container.ToDisk(); err != nil { + logrus.Errorf("Error saving container to disk: %v", err) + } +} + +// resetContainer resets the container's IO and ensures that the command is able to be executed again +// by copying the data into a new struct +// if lock is true, then container locked during reset +func (m *containerMonitor) resetContainer(lock bool) { + container := m.container + if lock { + container.Lock() + defer container.Unlock() + } + + if container.Config.OpenStdin { + if err := container.stdin.Close(); err != nil { + logrus.Errorf("%s: Error close stdin: %s", container.ID, err) + } + } + + if err := container.stdout.Clean(); err != nil { + logrus.Errorf("%s: Error close stdout: %s", container.ID, err) + } + + if err := container.stderr.Clean(); err != nil { + logrus.Errorf("%s: Error close stderr: %s", container.ID, err) + } + + if container.command != nil && container.command.ProcessConfig.Terminal != nil { + if err := container.command.ProcessConfig.Terminal.Close(); err != nil { + logrus.Errorf("%s: Error closing terminal: %s", container.ID, err) + } + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } + + if container.logDriver != nil { + if container.logCopier != nil { + exit := make(chan struct{}) + go func() { + container.logCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warnf("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.logDriver.Close() + container.logCopier = nil + container.logDriver = nil + } + + c := container.command.ProcessConfig.Cmd + + container.command.ProcessConfig.Cmd = exec.Cmd{ + Stdin: c.Stdin, + Stdout: c.Stdout, + Stderr: c.Stderr, + Path: c.Path, + Env: c.Env, + ExtraFiles: c.ExtraFiles, + Args: c.Args, + Dir: c.Dir, + SysProcAttr: c.SysProcAttr, + } +} diff --git a/daemon/network/settings.go b/daemon/network/settings.go new file mode 100644 index 00000000..a2e61eb9 --- /dev/null +++ b/daemon/network/settings.go @@ -0,0 +1,31 @@ +package network + +import "github.com/docker/docker/pkg/nat" + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// Settings stores configuration details about the daemon network config +type Settings struct { + Bridge string + EndpointID string + Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + HairpinMode bool + IPAddress string + IPPrefixLen int + IPv6Gateway string + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + MacAddress string + NetworkID string + PortMapping map[string]map[string]string // Deprecated + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []Address + SecondaryIPv6Addresses []Address +} diff --git a/daemon/pause.go b/daemon/pause.go new file mode 100644 index 00000000..74513864 --- /dev/null +++ b/daemon/pause.go @@ -0,0 +1,17 @@ +package daemon + +import "fmt" + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + if err := container.Pause(); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", name, err) + } + + return nil +} diff --git a/daemon/rename.go b/daemon/rename.go new file mode 100644 index 00000000..46642979 --- /dev/null +++ b/daemon/rename.go @@ -0,0 +1,45 @@ +package daemon + +import ( + "fmt" +) + +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + if oldName == "" || newName == "" { + return fmt.Errorf("usage: docker rename OLD_NAME NEW_NAME") + } + + container, err := daemon.Get(oldName) + if err != nil { + return err + } + + oldName = container.Name + + container.Lock() + defer container.Unlock() + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %s", err) + } + + container.Name = newName + + undo := func() { + container.Name = oldName + daemon.reserveName(container.ID, oldName) + daemon.containerGraph.Delete(newName) + } + + if err := daemon.containerGraph.Delete(oldName); err != nil { + undo() + return fmt.Errorf("Failed to delete container %q: %v", oldName, err) + } + + if err := container.toDisk(); err != nil { + undo() + return err + } + + container.LogEvent("rename") + return nil +} diff --git a/daemon/resize.go b/daemon/resize.go new file mode 100644 index 00000000..f2253946 --- /dev/null +++ b/daemon/resize.go @@ -0,0 +1,19 @@ +package daemon + +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + return container.Resize(height, width) +} + +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + execConfig, err := daemon.getExecConfig(name) + if err != nil { + return err + } + + return execConfig.Resize(height, width) +} diff --git a/daemon/restart.go b/daemon/restart.go new file mode 100644 index 00000000..cf7d9e18 --- /dev/null +++ b/daemon/restart.go @@ -0,0 +1,14 @@ +package daemon + +import "fmt" + +func (daemon *Daemon) ContainerRestart(name string, seconds int) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + if err := container.Restart(seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %s\n", name, err) + } + return nil +} diff --git a/daemon/start.go b/daemon/start.go new file mode 100644 index 00000000..9df56c5a --- /dev/null +++ b/daemon/start.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + return fmt.Errorf("Container already started") + } + + if _, err = daemon.verifyContainerSettings(hostConfig, nil); err != nil { + return err + } + + // Windows does not have the backwards compatibilty issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + if err := container.Start(); err != nil { + return fmt.Errorf("Cannot start container %s: %s", name, err) + } + + return nil +} diff --git a/daemon/state.go b/daemon/state.go new file mode 100644 index 00000000..861671d7 --- /dev/null +++ b/daemon/state.go @@ -0,0 +1,276 @@ +package daemon + +import ( + "fmt" + "sync" + "time" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/units" +) + +type State struct { + sync.Mutex + Running bool + Paused bool + Restarting bool + OOMKilled bool + removalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCode int + Error string // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + waitChan chan struct{} +} + +func NewState() *State { + return &State{ + waitChan: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.removalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +func isValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +func wait(waitChan <-chan struct{}, timeout time.Duration) error { + if timeout < 0 { + <-waitChan + return nil + } + select { + case <-time.After(timeout): + return fmt.Errorf("Timed out: %v", timeout) + case <-waitChan: + return nil + } +} + +// WaitRunning waits until state is running. If state already running it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns pid, that was passed to SetRunning +func (s *State) WaitRunning(timeout time.Duration) (int, error) { + s.Lock() + if s.Running { + pid := s.Pid + s.Unlock() + return pid, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.GetPid(), nil +} + +// WaitStop waits until state is stopped. If state already stopped it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns exit code, that was passed to SetStopped +func (s *State) WaitStop(timeout time.Duration) (int, error) { + s.Lock() + if !s.Running { + exitCode := s.ExitCode + s.Unlock() + return exitCode, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.GetExitCode(), nil +} + +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +func (s *State) GetPid() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +func (s *State) GetExitCode() int { + s.Lock() + res := s.ExitCode + s.Unlock() + return res +} + +func (s *State) SetRunning(pid int) { + s.Lock() + s.setRunning(pid) + s.Unlock() +} + +func (s *State) setRunning(pid int) { + s.Error = "" + s.Running = true + s.Paused = false + s.Restarting = false + s.ExitCode = 0 + s.Pid = pid + s.StartedAt = time.Now().UTC() + close(s.waitChan) // fire waiters for start + s.waitChan = make(chan struct{}) +} + +func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) { + s.Lock() + s.setStopped(exitStatus) + s.Unlock() +} + +func (s *State) setStopped(exitStatus *execdriver.ExitStatus) { + s.Running = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCode = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetRestarting is when docker handles the auto restart of containers when they are +// in the middle of a stop and being restarted again +func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) { + s.Lock() + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCode = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) + s.Unlock() +} + +// setError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) setError(err error) { + s.Error = err.Error() +} + +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +func (s *State) SetPaused() { + s.Lock() + s.Paused = true + s.Unlock() +} + +func (s *State) SetUnpaused() { + s.Lock() + s.Paused = false + s.Unlock() +} + +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +func (s *State) SetRemovalInProgress() error { + s.Lock() + defer s.Unlock() + if s.removalInProgress { + return fmt.Errorf("Status is already RemovalInProgress") + } + s.removalInProgress = true + return nil +} + +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.removalInProgress = false + s.Unlock() +} + +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} diff --git a/daemon/state_test.go b/daemon/state_test.go new file mode 100644 index 00000000..861076ae --- /dev/null +++ b/daemon/state_test.go @@ -0,0 +1,104 @@ +package daemon + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/daemon/execdriver" +) + +func TestStateRunStop(t *testing.T) { + s := NewState() + for i := 1; i < 3; i++ { // full lifecycle two times + started := make(chan struct{}) + var pid int64 + go func() { + runPid, _ := s.WaitRunning(-1 * time.Second) + atomic.StoreInt64(&pid, int64(runPid)) + close(started) + }() + s.SetRunning(i + 100) + if !s.IsRunning() { + t.Fatal("State not running") + } + if s.Pid != i+100 { + t.Fatalf("Pid %v, expected %v", s.Pid, i+100) + } + if s.ExitCode != 0 { + t.Fatalf("ExitCode %v, expected 0", s.ExitCode) + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("Start callback doesn't fire in 100 milliseconds") + case <-started: + t.Log("Start callback fired") + } + runPid := int(atomic.LoadInt64(&pid)) + if runPid != i+100 { + t.Fatalf("Pid %v, expected %v", runPid, i+100) + } + if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 { + t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) + } + + stopped := make(chan struct{}) + var exit int64 + go func() { + exitCode, _ := s.WaitStop(-1 * time.Second) + atomic.StoreInt64(&exit, int64(exitCode)) + close(stopped) + }() + s.SetStopped(&execdriver.ExitStatus{ExitCode: i}) + if s.IsRunning() { + t.Fatal("State is running") + } + if s.ExitCode != i { + t.Fatalf("ExitCode %v, expected %v", s.ExitCode, i) + } + if s.Pid != 0 { + t.Fatalf("Pid %v, expected 0", s.Pid) + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + exitCode := int(atomic.LoadInt64(&exit)) + if exitCode != i { + t.Fatalf("ExitCode %v, expected %v", exitCode, i) + } + if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { + t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) + } + } +} + +func TestStateTimeoutWait(t *testing.T) { + s := NewState() + started := make(chan struct{}) + go func() { + s.WaitRunning(100 * time.Millisecond) + close(started) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Start callback doesn't fire in 100 milliseconds") + case <-started: + t.Log("Start callback fired") + } + s.SetRunning(42) + stopped := make(chan struct{}) + go func() { + s.WaitRunning(100 * time.Millisecond) + close(stopped) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Start callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Start callback fired") + } + +} diff --git a/daemon/stats.go b/daemon/stats.go new file mode 100644 index 00000000..c5079624 --- /dev/null +++ b/daemon/stats.go @@ -0,0 +1,118 @@ +package daemon + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/libnetwork/sandbox" + "github.com/opencontainers/runc/libcontainer" +) + +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Stop <-chan bool +} + +func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig) error { + updates, err := daemon.SubscribeToContainerStats(name) + if err != nil { + return err + } + + if config.Stream { + config.OutStream.Write(nil) + } + + var preCpuStats types.CpuStats + getStat := func(v interface{}) *types.Stats { + update := v.(*execdriver.ResourceStats) + // Retrieve the nw statistics from libnetwork and inject them in the Stats + if nwStats, err := daemon.getNetworkStats(name); err == nil { + update.Stats.Interfaces = nwStats + } + ss := convertStatsToAPITypes(update.Stats) + ss.PreCpuStats = preCpuStats + ss.MemoryStats.Limit = uint64(update.MemoryLimit) + ss.Read = update.Read + ss.CpuStats.SystemUsage = update.SystemUsage + preCpuStats = ss.CpuStats + return ss + } + + enc := json.NewEncoder(config.OutStream) + + defer daemon.UnsubscribeToContainerStats(name, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + s := getStat(v) + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(s); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-config.Stop: + return nil + } + } +} + +func (daemon *Daemon) getNetworkStats(name string) ([]*libcontainer.NetworkInterface, error) { + var list []*libcontainer.NetworkInterface + + c, err := daemon.Get(name) + if err != nil { + return list, err + } + + nw, err := daemon.netController.NetworkByID(c.NetworkSettings.NetworkID) + if err != nil { + return list, err + } + ep, err := nw.EndpointByID(c.NetworkSettings.EndpointID) + if err != nil { + return list, err + } + + stats, err := ep.Statistics() + if err != nil { + return list, err + } + + // Convert libnetwork nw stats into libcontainer nw stats + for ifName, ifStats := range stats { + list = append(list, convertLnNetworkStats(ifName, ifStats)) + } + + return list, nil +} + +func convertLnNetworkStats(name string, stats *sandbox.InterfaceStatistics) *libcontainer.NetworkInterface { + n := &libcontainer.NetworkInterface{Name: name} + n.RxBytes = stats.RxBytes + n.RxPackets = stats.RxPackets + n.RxErrors = stats.RxErrors + n.RxDropped = stats.RxDropped + n.TxBytes = stats.TxBytes + n.TxPackets = stats.TxPackets + n.TxErrors = stats.TxErrors + n.TxDropped = stats.TxDropped + return n +} diff --git a/daemon/stats_collector_unix.go b/daemon/stats_collector_unix.go new file mode 100644 index 00000000..73b6a872 --- /dev/null +++ b/daemon/stats_collector_unix.go @@ -0,0 +1,162 @@ +// +build !windows + +package daemon + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/pubsub" + "github.com/opencontainers/runc/libcontainer/system" +) + +// newStatsCollector returns a new statsCollector that collections +// network and cgroup stats for a registered container at the specified +// interval. The collector allows non-running containers to be added +// and will start processing stats when they are started. +func newStatsCollector(interval time.Duration) *statsCollector { + s := &statsCollector{ + interval: interval, + publishers: make(map[*Container]*pubsub.Publisher), + clockTicks: uint64(system.GetClockTicks()), + bufReader: bufio.NewReaderSize(nil, 128), + } + go s.run() + return s +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { + m sync.Mutex + interval time.Duration + clockTicks uint64 + publishers map[*Container]*pubsub.Publisher + bufReader *bufio.Reader +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +func (s *statsCollector) run() { + type publishersPair struct { + container *Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + systemUsage, err := s.getSystemCpuUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + + for _, pair := range pairs { + stats, err := pair.container.Stats() + if err != nil { + if err != execdriver.ErrNotRunning { + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + } + continue + } + stats.SystemUsage = systemUsage + pair.publisher.Publish(stats) + } + } +} + +const nanoSeconds = 1e9 + +// getSystemCpuUSage returns the host system's cpu usage in nanoseconds +// for the system to match the cgroup readings are returned in the same format. +func (s *statsCollector) getSystemCpuUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var sum uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + sum += v + } + return (sum * nanoSeconds) / s.clockTicks, nil + } + } + return 0, fmt.Errorf("invalid stat format") +} diff --git a/daemon/stats_collector_windows.go b/daemon/stats_collector_windows.go new file mode 100644 index 00000000..f16d7ee3 --- /dev/null +++ b/daemon/stats_collector_windows.go @@ -0,0 +1,31 @@ +package daemon + +import "time" + +// newStatsCollector returns a new statsCollector for collection stats +// for a registered container at the specified interval. The collector allows +// non-running containers to be added and will start processing stats when +// they are started. +func newStatsCollector(interval time.Duration) *statsCollector { + return &statsCollector{} +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *Container) chan interface{} { + return nil +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *Container) { +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) { +} diff --git a/daemon/stats_linux.go b/daemon/stats_linux.go new file mode 100644 index 00000000..8c1b0873 --- /dev/null +++ b/daemon/stats_linux.go @@ -0,0 +1,76 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/opencontainers/runc/libcontainer" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +// convertStatsToAPITypes converts the libcontainer.Stats to the api specific +// structs. This is done to preserve API compatibility and versioning. +func convertStatsToAPITypes(ls *libcontainer.Stats) *types.Stats { + s := &types.Stats{} + if ls.Interfaces != nil { + s.Network = types.Network{} + for _, iface := range ls.Interfaces { + s.Network.RxBytes += iface.RxBytes + s.Network.RxPackets += iface.RxPackets + s.Network.RxErrors += iface.RxErrors + s.Network.RxDropped += iface.RxDropped + s.Network.TxBytes += iface.TxBytes + s.Network.TxPackets += iface.TxPackets + s.Network.TxErrors += iface.TxErrors + s.Network.TxDropped += iface.TxDropped + } + } + + cs := ls.CgroupStats + if cs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cs.BlkioStats.SectorsRecursive), + } + cpu := cs.CpuStats + s.CpuStats = types.CpuStats{ + CpuUsage: types.CpuUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cs.MemoryStats + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage.Usage, + MaxUsage: mem.Usage.MaxUsage, + Stats: mem.Stats, + Failcnt: mem.Usage.Failcnt, + } + } + + return s +} + +func copyBlkioEntry(entries []cgroups.BlkioStatEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} diff --git a/daemon/stats_windows.go b/daemon/stats_windows.go new file mode 100644 index 00000000..c79eb640 --- /dev/null +++ b/daemon/stats_windows.go @@ -0,0 +1,14 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/opencontainers/runc/libcontainer" +) + +// convertStatsToAPITypes converts the libcontainer.Stats to the api specific +// structs. This is done to preserve API compatibility and versioning. +func convertStatsToAPITypes(ls *libcontainer.Stats) *types.Stats { + // TODO Windows. Refactor accordingly to fill in stats. + s := &types.Stats{} + return s +} diff --git a/daemon/stop.go b/daemon/stop.go new file mode 100644 index 00000000..23253e39 --- /dev/null +++ b/daemon/stop.go @@ -0,0 +1,17 @@ +package daemon + +import "fmt" + +func (daemon *Daemon) ContainerStop(name string, seconds int) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + if !container.IsRunning() { + return fmt.Errorf("Container already stopped") + } + if err := container.Stop(seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %s\n", name, err) + } + return nil +} diff --git a/daemon/top.go b/daemon/top.go new file mode 100644 index 00000000..30a7893a --- /dev/null +++ b/daemon/top.go @@ -0,0 +1,73 @@ +package daemon + +import ( + "fmt" + "os/exec" + "strconv" + "strings" + + "github.com/docker/docker/api/types" +) + +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + if psArgs == "" { + psArgs = "-ef" + } + + container, err := daemon.Get(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running", name) + } + + pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %s", err) + } + + procList := &types.ContainerProcessList{} + + lines := strings.Split(string(output), "\n") + procList.Titles = strings.Fields(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) + } + } + } + container.LogEvent("top") + return procList, nil +} diff --git a/daemon/unpause.go b/daemon/unpause.go new file mode 100644 index 00000000..3550b7a9 --- /dev/null +++ b/daemon/unpause.go @@ -0,0 +1,17 @@ +package daemon + +import "fmt" + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + if err := container.Unpause(); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", name, err) + } + + return nil +} diff --git a/daemon/utils_nounix.go b/daemon/utils_nounix.go new file mode 100644 index 00000000..25a56ad1 --- /dev/null +++ b/daemon/utils_nounix.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/daemon/utils_test.go b/daemon/utils_test.go new file mode 100644 index 00000000..99165f78 --- /dev/null +++ b/daemon/utils_test.go @@ -0,0 +1,28 @@ +// +build linux + +package daemon + +import ( + "testing" + + "github.com/docker/docker/runconfig" +) + +func TestMergeLxcConfig(t *testing.T) { + kv := []runconfig.KeyValuePair{ + {"lxc.cgroups.cpuset", "1,2"}, + } + hostConfig := &runconfig.HostConfig{ + LxcConf: runconfig.NewLxcConfig(kv), + } + + out, err := mergeLxcConfIntoOptions(hostConfig) + if err != nil { + t.Fatalf("Failed to merge Lxc Config: %s", err) + } + + cpuset := out[0] + if expected := "cgroups.cpuset=1,2"; cpuset != expected { + t.Fatalf("expected %s got %s", expected, cpuset) + } +} diff --git a/daemon/utils_unix.go b/daemon/utils_unix.go new file mode 100644 index 00000000..042544e4 --- /dev/null +++ b/daemon/utils_unix.go @@ -0,0 +1,48 @@ +// +build linux + +package daemon + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/docker/runconfig" + "github.com/opencontainers/runc/libcontainer/selinux" +) + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} + +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} + +func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) ([]string, error) { + if hostConfig == nil { + return nil, nil + } + + out := []string{} + + // merge in the lxc conf options into the generic config map + if lxcConf := hostConfig.LxcConf; lxcConf != nil { + lxSlice := lxcConf.Slice() + for _, pair := range lxSlice { + // because lxc conf gets the driver name lxc.XXXX we need to trim it off + // and let the lxc driver add it back later if needed + if !strings.Contains(pair.Key, ".") { + return nil, errors.New("Illegal Key passed into LXC Configurations") + } + parts := strings.SplitN(pair.Key, ".", 2) + out = append(out, fmt.Sprintf("%s=%s", parts[1], pair.Value)) + } + } + + return out, nil +} diff --git a/daemon/volumes.go b/daemon/volumes.go new file mode 100644 index 00000000..556e3049 --- /dev/null +++ b/daemon/volumes.go @@ -0,0 +1,366 @@ +package daemon + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/opencontainers/runc/libcontainer/label" +) + +// ErrVolumeReadonly is used to signal an error when trying to copy data into +// a volume mount that is not writable. +var ErrVolumeReadonly = errors.New("mounted volume is marked read-only") + +type mountPoint struct { + Name string + Destination string + Driver string + RW bool + Volume volume.Volume `json:"-"` + Source string + Relabel string +} + +func (m *mountPoint) Setup() (string, error) { + if m.Volume != nil { + return m.Volume.Mount() + } + + if len(m.Source) > 0 { + if _, err := os.Stat(m.Source); err != nil { + if !os.IsNotExist(err) { + return "", err + } + if err := system.MkdirAll(m.Source, 0755); err != nil { + return "", err + } + } + return m.Source, nil + } + + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") +} + +// hasResource checks whether the given absolute path for a container is in +// this mount point. If the relative path starts with `../` then the resource +// is outside of this mount point, but we can't simply check for this prefix +// because it misses `..` which is also outside of the mount, so check both. +func (m *mountPoint) hasResource(absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +func (m *mountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + + return m.Source +} + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Only bind mounts and local volumes can be used in old versions of Docker. +func (m *mountPoint) BackwardsCompatible() bool { + return len(m.Source) > 0 || m.Driver == volume.DefaultDriverName +} + +func parseBindMount(spec string, mountLabel string, config *runconfig.Config) (*mountPoint, error) { + bind := &mountPoint{ + RW: true, + } + arr := strings.Split(spec, ":") + + switch len(arr) { + case 2: + bind.Destination = arr[1] + case 3: + bind.Destination = arr[1] + mode := arr[2] + isValid, isRw := volume.ValidateMountMode(mode) + if !isValid { + return nil, fmt.Errorf("invalid mode for volumes-from: %s", mode) + } + bind.RW = isRw + // Relabel will apply a SELinux label, if necessary + bind.Relabel = mode + default: + return nil, fmt.Errorf("Invalid volume specification: %s", spec) + } + + name, source, err := parseVolumeSource(arr[0]) + if err != nil { + return nil, err + } + + if len(source) == 0 { + bind.Driver = config.VolumeDriver + if len(bind.Driver) == 0 { + bind.Driver = volume.DefaultDriverName + } + } else { + bind.Source = filepath.Clean(source) + } + + bind.Name = name + bind.Destination = filepath.Clean(bind.Destination) + return bind, nil +} + +func parseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec) + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if isValid, _ := volume.ValidateMountMode(mode); !isValid { + return "", "", fmt.Errorf("invalid mode for volumes-from: %s", mode) + } + } + return id, mode, nil +} + +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty copy files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error { + binds := map[string]bool{} + mountPoints := map[string]*mountPoint{} + + // 1. Read already configured mount points. + for name, point := range container.MountPoints { + mountPoints[name] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := parseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.Get(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &mountPoint{ + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + } + + if len(cp.Source) == 0 { + v, err := createVolume(cp.Name, cp.Driver) + if err != nil { + return err + } + cp.Volume = v + } + + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + // #10618 + bind, err := parseBindMount(b, container.MountLabel, container.Config) + if err != nil { + return err + } + + if binds[bind.Destination] { + return fmt.Errorf("Duplicate bind mount %s", bind.Destination) + } + + if len(bind.Name) > 0 && len(bind.Driver) > 0 { + // create the volume + v, err := createVolume(bind.Name, bind.Driver) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // Since this is just a named volume and not a typical bind, set to shared mode `z` + if bind.Relabel == "" { + bind.Relabel = "z" + } + } + + if err := label.Relabel(bind.Source, container.MountLabel, bind.Relabel); err != nil { + return err + } + binds[bind.Destination] = true + mountPoints[bind.Destination] = bind + } + + // Keep backwards compatible structures + bcVolumes := map[string]string{} + bcVolumesRW := map[string]bool{} + for _, m := range mountPoints { + if m.BackwardsCompatible() { + bcVolumes[m.Destination] = m.Path() + bcVolumesRW[m.Destination] = m.RW + } + } + + container.Lock() + container.MountPoints = mountPoints + container.Volumes = bcVolumes + container.VolumesRW = bcVolumesRW + container.Unlock() + + return nil +} + +// TODO Windows. Factor out as not relevant (as Windows daemon support not in pre-1.7) +// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. +// It reads the container configuration and creates valid mount points for the old volumes. +func (daemon *Daemon) verifyVolumesInfo(container *Container) error { + // Inspect old structures only when we're upgrading from old versions + // to versions >= 1.7 and the MountPoints has not been populated with volumes data. + if len(container.MountPoints) == 0 && len(container.Volumes) > 0 { + for destination, hostPath := range container.Volumes { + vfsPath := filepath.Join(daemon.root, "vfs", "dir") + rw := container.VolumesRW != nil && container.VolumesRW[destination] + + if strings.HasPrefix(hostPath, vfsPath) { + id := filepath.Base(hostPath) + if err := migrateVolume(id, hostPath); err != nil { + return err + } + container.addLocalMountPoint(id, destination, rw) + } else { // Bind mount + id, source, err := parseVolumeSource(hostPath) + // We should not find an error here coming + // from the old configuration, but who knows. + if err != nil { + return err + } + container.addBindMountPoint(id, source, destination, rw) + } + } + } else if len(container.MountPoints) > 0 { + // Volumes created with a Docker version >= 1.7. We verify integrity in case of data created + // with Docker 1.7 RC versions that put the information in + // DOCKER_ROOT/volumes/VOLUME_ID rather than DOCKER_ROOT/volumes/VOLUME_ID/_container_data. + l, err := getVolumeDriver(volume.DefaultDriverName) + if err != nil { + return err + } + + for _, m := range container.MountPoints { + if m.Driver != volume.DefaultDriverName { + continue + } + dataPath := l.(*local.Root).DataPath(m.Name) + volumePath := filepath.Dir(dataPath) + + d, err := ioutil.ReadDir(volumePath) + if err != nil { + // If the volume directory doesn't exist yet it will be recreated, + // so we only return the error when there is a different issue. + if !os.IsNotExist(err) { + return err + } + // Do not check when the volume directory does not exist. + continue + } + if validVolumeLayout(d) { + continue + } + + if err := os.Mkdir(dataPath, 0755); err != nil { + return err + } + + // Move data inside the data directory + for _, f := range d { + oldp := filepath.Join(volumePath, f.Name()) + newp := filepath.Join(dataPath, f.Name()) + if err := os.Rename(oldp, newp); err != nil { + logrus.Errorf("Unable to move %s to %s\n", oldp, newp) + } + } + } + + return container.ToDisk() + } + + return nil +} + +func createVolume(name, driverName string) (volume.Volume, error) { + vd, err := getVolumeDriver(driverName) + if err != nil { + return nil, err + } + return vd.Create(name) +} + +func removeVolume(v volume.Volume) error { + vd, err := getVolumeDriver(v.DriverName()) + if err != nil { + return nil + } + return vd.Remove(v) +} + +func getVolumeDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return volumedrivers.Lookup(name) +} + +func parseVolumeSource(spec string) (string, string, error) { + if !filepath.IsAbs(spec) { + return spec, "", nil + } + + return "", spec, nil +} diff --git a/daemon/volumes_linux.go b/daemon/volumes_linux.go new file mode 100644 index 00000000..0155c522 --- /dev/null +++ b/daemon/volumes_linux.go @@ -0,0 +1,120 @@ +// +build !windows + +package daemon + +import ( + "os" + "path/filepath" + "sort" + "strings" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/local" +) + +// copyOwnership copies the permissions and uid:gid of the source file +// into the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + if err := os.Chown(destination, int(stat.Uid()), int(stat.Gid())); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode())) +} + +func (container *Container) setupMounts() ([]execdriver.Mount, error) { + var mounts []execdriver.Mount + for _, m := range container.MountPoints { + path, err := m.Setup() + if err != nil { + return nil, err + } + if !container.trySetNetworkMount(m.Destination, path) { + mounts = append(mounts, execdriver.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + }) + } + } + + mounts = sortMounts(mounts) + return append(mounts, container.networkMounts()...), nil +} + +func sortMounts(m []execdriver.Mount) []execdriver.Mount { + sort.Sort(mounts(m)) + return m +} + +type mounts []execdriver.Mount + +func (m mounts) Len() int { + return len(m) +} + +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +func (m mounts) parts(i int) int { + return len(strings.Split(filepath.Clean(m[i].Destination), string(os.PathSeparator))) +} + +// migrateVolume links the contents of a volume created pre Docker 1.7 +// into the location expected by the local driver. +// It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. +// It preserves the volume json configuration generated pre Docker 1.7 to be able to +// downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. +func migrateVolume(id, vfs string) error { + l, err := getVolumeDriver(volume.DefaultDriverName) + if err != nil { + return err + } + + newDataPath := l.(*local.Root).DataPath(id) + fi, err := os.Stat(newDataPath) + if err != nil && !os.IsNotExist(err) { + return err + } + + if fi != nil && fi.IsDir() { + return nil + } + + return os.Symlink(vfs, newDataPath) +} + +// validVolumeLayout checks whether the volume directory layout +// is valid to work with Docker post 1.7 or not. +func validVolumeLayout(files []os.FileInfo) bool { + if len(files) == 1 && files[0].Name() == local.VolumeDataPathName && files[0].IsDir() { + return true + } + + if len(files) != 2 { + return false + } + + for _, f := range files { + if f.Name() == "config.json" || + (f.Name() == local.VolumeDataPathName && f.Mode()&os.ModeSymlink == os.ModeSymlink) { + // Old volume configuration, we ignore it + continue + } + return false + } + + return true +} diff --git a/daemon/volumes_linux_unit_test.go b/daemon/volumes_linux_unit_test.go new file mode 100644 index 00000000..842e101e --- /dev/null +++ b/daemon/volumes_linux_unit_test.go @@ -0,0 +1,87 @@ +// +build experimental + +package daemon + +import ( + "testing" + + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +type fakeDriver struct{} + +func (fakeDriver) Name() string { return "fake" } +func (fakeDriver) Create(name string) (volume.Volume, error) { return nil, nil } +func (fakeDriver) Remove(v volume.Volume) error { return nil } + +func TestGetVolumeDriver(t *testing.T) { + _, err := getVolumeDriver("missing") + if err == nil { + t.Fatal("Expected error, was nil") + } + + volumedrivers.Register(fakeDriver{}, "fake") + d, err := getVolumeDriver("fake") + if err != nil { + t.Fatal(err) + } + if d.Name() != "fake" { + t.Fatalf("Expected fake driver, got %s\n", d.Name()) + } +} + +func TestParseBindMount(t *testing.T) { + cases := []struct { + bind string + driver string + expDest string + expSource string + expName string + expDriver string + mountLabel string + expRW bool + fail bool + }{ + {"/tmp:/tmp", "", "/tmp", "/tmp", "", "", "", true, false}, + {"/tmp:/tmp:ro", "", "/tmp", "/tmp", "", "", "", false, false}, + {"/tmp:/tmp:rw", "", "/tmp", "/tmp", "", "", "", true, false}, + {"/tmp:/tmp:foo", "", "/tmp", "/tmp", "", "", "", false, true}, + {"name:/tmp", "", "/tmp", "", "name", "local", "", true, false}, + {"name:/tmp", "external", "/tmp", "", "name", "external", "", true, false}, + {"name:/tmp:ro", "local", "/tmp", "", "name", "local", "", false, false}, + {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "local", "", true, false}, + } + + for _, c := range cases { + conf := &runconfig.Config{VolumeDriver: c.driver} + m, err := parseBindMount(c.bind, c.mountLabel, conf) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) + } + continue + } + + if m.Destination != c.expDest { + t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind) + } + + if m.Source != c.expSource { + t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind) + } + + if m.Name != c.expName { + t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind) + } + + if m.Driver != c.expDriver { + t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind) + } + + if m.RW != c.expRW { + t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind) + } + } +} diff --git a/daemon/volumes_unit_test.go b/daemon/volumes_unit_test.go new file mode 100644 index 00000000..b1e7f72f --- /dev/null +++ b/daemon/volumes_unit_test.go @@ -0,0 +1,35 @@ +package daemon + +import "testing" + +func TestParseVolumeFrom(t *testing.T) { + cases := []struct { + spec string + expId string + expMode string + fail bool + }{ + {"", "", "", true}, + {"foobar", "foobar", "rw", false}, + {"foobar:rw", "foobar", "rw", false}, + {"foobar:ro", "foobar", "ro", false}, + {"foobar:baz", "", "", true}, + } + + for _, c := range cases { + id, mode, err := parseVolumesFrom(c.spec) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) + } + continue + } + + if id != c.expId { + t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec) + } + if mode != c.expMode { + t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) + } + } +} diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go new file mode 100644 index 00000000..e75b2dc6 --- /dev/null +++ b/daemon/volumes_windows.go @@ -0,0 +1,26 @@ +// +build windows + +package daemon + +import ( + "os" + + "github.com/docker/docker/daemon/execdriver" +) + +// Not supported on Windows +func copyOwnership(source, destination string) error { + return nil +} + +func (container *Container) setupMounts() ([]execdriver.Mount, error) { + return nil, nil +} + +func migrateVolume(id, vfs string) error { + return nil +} + +func validVolumeLayout(files []os.FileInfo) bool { + return true +} diff --git a/daemon/wait.go b/daemon/wait.go new file mode 100644 index 00000000..1101b2f0 --- /dev/null +++ b/daemon/wait.go @@ -0,0 +1,12 @@ +package daemon + +import "time" + +func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { + container, err := daemon.Get(name) + if err != nil { + return -1, err + } + + return container.WaitStop(timeout) +} diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..015bc133 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/docker/client.go b/docker/client.go new file mode 100644 index 00000000..8395805b --- /dev/null +++ b/docker/client.go @@ -0,0 +1,28 @@ +package main + +import ( + "path/filepath" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + flag "github.com/docker/docker/pkg/mflag" +) + +var clientFlags = &cli.ClientFlags{FlagSet: new(flag.FlagSet), Common: commonFlags} + +func init() { + client := clientFlags.FlagSet + client.StringVar(&clientFlags.ConfigDir, []string{"-config"}, cliconfig.ConfigDir(), "Location of client config files") + + clientFlags.PostParse = func() { + clientFlags.Common.PostParse() + + if clientFlags.ConfigDir != "" { + cliconfig.SetConfigDir(clientFlags.ConfigDir) + } + + if clientFlags.Common.TrustKey == "" { + clientFlags.Common.TrustKey = filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) + } + } +} diff --git a/docker/common.go b/docker/common.go new file mode 100644 index 00000000..351257e7 --- /dev/null +++ b/docker/common.go @@ -0,0 +1,101 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/tlsconfig" +) + +const ( + defaultTrustKeyFile = "key.json" + defaultCaFile = "ca.pem" + defaultKeyFile = "key.pem" + defaultCertFile = "cert.pem" +) + +var ( + daemonFlags *flag.FlagSet + commonFlags = &cli.CommonFlags{FlagSet: new(flag.FlagSet)} + + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +func init() { + if dockerCertPath == "" { + dockerCertPath = cliconfig.ConfigDir() + } + + commonFlags.PostParse = postParseCommon + + cmd := commonFlags.FlagSet + + cmd.BoolVar(&commonFlags.Debug, []string{"D", "-debug"}, false, "Enable debug mode") + cmd.StringVar(&commonFlags.LogLevel, []string{"l", "-log-level"}, "info", "Set the logging level") + cmd.BoolVar(&commonFlags.TLS, []string{"-tls"}, false, "Use TLS; implied by --tlsverify") + cmd.BoolVar(&commonFlags.TLSVerify, []string{"-tlsverify"}, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file") + + var tlsOptions tlsconfig.Options + commonFlags.TLSOptions = &tlsOptions + cmd.StringVar(&tlsOptions.CAFile, []string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust certs signed only by this CA") + cmd.StringVar(&tlsOptions.CertFile, []string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") + cmd.StringVar(&tlsOptions.KeyFile, []string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") + + cmd.Var(opts.NewListOptsRef(&commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") +} + +func postParseCommon() { + cmd := commonFlags.FlagSet + + if commonFlags.LogLevel != "" { + lvl, err := logrus.ParseLevel(commonFlags.LogLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", commonFlags.LogLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } + + if commonFlags.Debug { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) + } + + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on tls + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well + if cmd.IsSet("-tlsverify") || commonFlags.TLSVerify { + commonFlags.TLS = true + } + + if !commonFlags.TLS { + commonFlags.TLSOptions = nil + } else { + tlsOptions := commonFlags.TLSOptions + tlsOptions.InsecureSkipVerify = !commonFlags.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !cmd.IsSet("-tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !cmd.IsSet("-tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} diff --git a/docker/daemon.go b/docker/daemon.go new file mode 100644 index 00000000..1017fede --- /dev/null +++ b/docker/daemon.go @@ -0,0 +1,325 @@ +// +build daemon + +package main + +import ( + "crypto/tls" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/timeutils" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +const daemonUsage = " docker daemon [ --help | ... ]\n" + +var ( + flDaemon = flag.Bool([]string{"#d", "#-daemon"}, false, "Enable daemon mode (deprecated; use docker daemon)") + daemonCli cli.Handler = NewDaemonCli() +) + +// TODO: remove once `-d` is retired +func handleGlobalDaemonFlag() { + // This block makes sure that if the deprecated daemon flag `--daemon` is absent, + // then all daemon-specific flags are absent as well. + if !*flDaemon && daemonFlags != nil { + flag.CommandLine.Visit(func(fl *flag.Flag) { + for _, name := range fl.Names { + name := strings.TrimPrefix(name, "#") + if daemonFlags.Lookup(name) != nil { + // daemon flag was NOT specified, but daemon-specific flags were + // so let's error out + fmt.Fprintf(os.Stderr, "docker: the daemon flag '-%s' must follow the 'docker daemon' command.\n", name) + os.Exit(1) + } + } + }) + } + + if *flDaemon { + if *flHelp { + // We do not show the help output here, instead, we tell the user about the new daemon command, + // because the help output is so long they would not see the warning anyway. + fmt.Fprintln(os.Stderr, "Please use 'docker daemon --help' instead.") + os.Exit(0) + } + daemonCli.(*DaemonCli).CmdDaemon(flag.Args()...) + os.Exit(0) + } +} + +func presentInHelp(usage string) string { return usage } +func absentFromHelp(string) string { return "" } + +// NewDaemonCli returns a pre-configured daemon CLI +func NewDaemonCli() *DaemonCli { + daemonFlags = cli.Subcmd("daemon", nil, "Enable daemon mode", true) + + // TODO(tiborvass): remove InstallFlags? + daemonConfig := new(daemon.Config) + daemonConfig.LogConfig.Config = make(map[string]string) + daemonConfig.InstallFlags(daemonFlags, presentInHelp) + daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp) + registryOptions := new(registry.Options) + registryOptions.InstallFlags(daemonFlags, presentInHelp) + registryOptions.InstallFlags(flag.CommandLine, absentFromHelp) + daemonFlags.Require(flag.Exact, 0) + + return &DaemonCli{ + Config: daemonConfig, + registryOptions: registryOptions, + } +} + +func migrateKey() (err error) { + // Migrate trust key if exists at ~/.docker/key.json and owned by current user + oldPath := filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) + newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) + if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { + defer func() { + // Ensure old path is removed if no error occurred + if err == nil { + err = os.Remove(oldPath) + } else { + logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) + os.Remove(newPath) + } + }() + + if err := system.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil { + return fmt.Errorf("Unable to create daemon configuration directory: %s", err) + } + + newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return fmt.Errorf("error creating key file %q: %s", newPath, err) + } + defer newFile.Close() + + oldFile, err := os.Open(oldPath) + if err != nil { + return fmt.Errorf("error opening key file %q: %s", oldPath, err) + } + defer oldFile.Close() + + if _, err := io.Copy(newFile, oldFile); err != nil { + return fmt.Errorf("error copying key: %s", err) + } + + logrus.Infof("Migrated key from %s to %s", oldPath, newPath) + } + + return nil +} + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *daemon.Config + registryOptions *registry.Options +} + +func getGlobalFlag() (globalFlag *flag.Flag) { + defer func() { + if x := recover(); x != nil { + switch f := x.(type) { + case *flag.Flag: + globalFlag = f + default: + panic(x) + } + } + }() + visitor := func(f *flag.Flag) { panic(f) } + commonFlags.FlagSet.Visit(visitor) + clientFlags.FlagSet.Visit(visitor) + return +} + +// CmdDaemon is the daemon command, called the raw arguments after `docker daemon`. +func (cli *DaemonCli) CmdDaemon(args ...string) error { + if *flDaemon { + // allow legacy forms `docker -D -d` and `docker -d -D` + logrus.Warn("please use 'docker daemon' instead.") + } else if !commonFlags.FlagSet.IsEmpty() || !clientFlags.FlagSet.IsEmpty() { + // deny `docker -D daemon` + illegalFlag := getGlobalFlag() + fmt.Fprintf(os.Stderr, "invalid flag '-%s'.\nSee 'docker daemon --help'.\n", illegalFlag.Names[0]) + os.Exit(1) + } else { + // allow new form `docker daemon -D` + flag.Merge(daemonFlags, commonFlags.FlagSet) + } + + daemonFlags.ParseFlags(args, true) + commonFlags.PostParse() + + if len(commonFlags.Hosts) == 0 { + commonFlags.Hosts = []string{opts.DefaultHost} + } + if commonFlags.TrustKey == "" { + commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) + } + + if utils.ExperimentalBuild() { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{TimestampFormat: timeutils.RFC3339NanoFixed}) + + if err := setDefaultUmask(); err != nil { + logrus.Fatalf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + logrus.Fatalf("Failed to set log opts: %v", err) + } + } + + var pfile *pidfile.PidFile + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + logrus.Fatalf("Error starting daemon: %v", err) + } + pfile = pf + defer func() { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + serverConfig := &apiserver.ServerConfig{ + Logging: true, + EnableCors: cli.EnableCors, + CorsHeaders: cli.CorsHeaders, + Version: dockerversion.VERSION, + } + serverConfig = setPlatformServerConfig(serverConfig, cli.Config) + + if commonFlags.TLSOptions != nil { + if !commonFlags.TLSOptions.InsecureSkipVerify { + // server requires and verifies client's certificate + commonFlags.TLSOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(*commonFlags.TLSOptions) + if err != nil { + logrus.Fatal(err) + } + serverConfig.TLSConfig = tlsConfig + } + + api := apiserver.New(serverConfig) + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go func() { + if err := api.ServeApi(commonFlags.Hosts); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + serveAPIWait <- err + return + } + serveAPIWait <- nil + }() + + if err := migrateKey(); err != nil { + logrus.Fatal(err) + } + cli.TrustKeyPath = commonFlags.TrustKey + + registryService := registry.NewService(cli.registryOptions) + d, err := daemon.NewDaemon(cli.Config, registryService) + if err != nil { + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + logrus.Fatalf("Error starting daemon: %v", err) + } + + logrus.Info("Daemon has completed initialization") + + logrus.WithFields(logrus.Fields{ + "version": dockerversion.VERSION, + "commit": dockerversion.GITCOMMIT, + "execdriver": d.ExecutionDriver().Name(), + "graphdriver": d.GraphDriver().String(), + }).Info("Docker daemon") + + signal.Trap(func() { + api.Close() + <-serveAPIWait + shutdownDaemon(d, 15) + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + }) + + // after the daemon is done setting up we can tell the api to start + // accepting connections with specified daemon + api.AcceptConnections(d) + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + shutdownDaemon(d, 15) + if errAPI != nil { + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) + } + return nil +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) { + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + select { + case <-ch: + logrus.Debug("Clean shutdown succeded") + case <-time.After(timeout * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func getDaemonConfDir() string { + // TODO: update for Windows daemon + if runtime.GOOS == "windows" { + return cliconfig.ConfigDir() + } + return "/etc/docker" +} diff --git a/docker/daemon_none.go b/docker/daemon_none.go new file mode 100644 index 00000000..c829cc1c --- /dev/null +++ b/docker/daemon_none.go @@ -0,0 +1,12 @@ +// +build !daemon + +package main + +import "github.com/docker/docker/cli" + +const daemonUsage = "" + +var daemonCli cli.Handler + +// TODO: remove once `-d` is retired +func handleGlobalDaemonFlag() {} diff --git a/docker/daemon_unix.go b/docker/daemon_unix.go new file mode 100644 index 00000000..c64ae967 --- /dev/null +++ b/docker/daemon_unix.go @@ -0,0 +1,44 @@ +// +build daemon,!windows + +package main + +import ( + "fmt" + "os" + "syscall" + + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/system" + + _ "github.com/docker/docker/daemon/execdriver/lxc" + _ "github.com/docker/docker/daemon/execdriver/native" +) + +func setPlatformServerConfig(serverConfig *apiserver.ServerConfig, daemonCfg *daemon.Config) *apiserver.ServerConfig { + serverConfig.SocketGroup = daemonCfg.SocketGroup + return serverConfig +} + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.Uid()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} diff --git a/docker/daemon_windows.go b/docker/daemon_windows.go new file mode 100644 index 00000000..9a57d873 --- /dev/null +++ b/docker/daemon_windows.go @@ -0,0 +1,23 @@ +// +build daemon + +package main + +import ( + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/daemon" +) + +func setPlatformServerConfig(serverConfig *apiserver.ServerConfig, daemonCfg *daemon.Config) *apiserver.ServerConfig { + return serverConfig +} + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + return false +} + +// setDefaultUmask doesn't do anything on windows +func setDefaultUmask() error { + return nil +} diff --git a/docker/docker.go b/docker/docker.go new file mode 100644 index 00000000..5aa7786d --- /dev/null +++ b/docker/docker.go @@ -0,0 +1,88 @@ +package main + +import ( + "fmt" + "os" + "sort" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/client" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" +) + +func main() { + if reexec.Init() { + return + } + + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + + logrus.SetOutput(stderr) + + flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) + + flag.Usage = func() { + fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ --help | -v | --version ]\n\n") + fmt.Fprint(os.Stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") + + flag.CommandLine.SetOutput(os.Stdout) + flag.PrintDefaults() + + help := "\nCommands:\n" + + // TODO(tiborvass): no need to sort if we ensure dockerCommands is sorted + sort.Sort(byName(dockerCommands)) + + for _, cmd := range dockerCommands { + help += fmt.Sprintf(" %-10.10s%s\n", cmd.name, cmd.description) + } + + help += "\nRun 'docker COMMAND --help' for more information on a command." + fmt.Fprintf(os.Stdout, "%s\n", help) + } + + flag.Parse() + + if *flVersion { + showVersion() + return + } + + clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) + // TODO: remove once `-d` is retired + handleGlobalDaemonFlag() + + if *flHelp { + // if global flag --help is present, regardless of what other options and commands there are, + // just print the usage. + flag.Usage() + return + } + + c := cli.New(clientCli, daemonCli) + if err := c.Run(flag.Args()...); err != nil { + if sterr, ok := err.(cli.StatusError); ok { + if sterr.Status != "" { + fmt.Fprintln(os.Stderr, sterr.Status) + os.Exit(1) + } + os.Exit(sterr.StatusCode) + } + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func showVersion() { + if utils.ExperimentalBuild() { + fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.VERSION, dockerversion.GITCOMMIT) + } else { + fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT) + } +} diff --git a/docker/docker_windows.go b/docker/docker_windows.go new file mode 100644 index 00000000..a31dffc9 --- /dev/null +++ b/docker/docker_windows.go @@ -0,0 +1,5 @@ +package main + +import ( + _ "github.com/docker/docker/autogen/winresources" +) diff --git a/docker/flags.go b/docker/flags.go new file mode 100644 index 00000000..34567bb3 --- /dev/null +++ b/docker/flags.go @@ -0,0 +1,63 @@ +package main + +import flag "github.com/docker/docker/pkg/mflag" + +var ( + flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") + flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") +) + +type command struct { + name string + description string +} + +type byName []command + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { return a[i].name < a[j].name } + +// TODO(tiborvass): do not show 'daemon' on client-only binaries +// and deduplicate description in dockerCommands and cli subcommands +var dockerCommands = []command{ + {"attach", "Attach to a running container"}, + {"build", "Build an image from a Dockerfile"}, + {"commit", "Create a new image from a container's changes"}, + {"cp", "Copy files/folders from a container to a HOSTDIR or to STDOUT"}, + {"create", "Create a new container"}, + {"diff", "Inspect changes on a container's filesystem"}, + {"events", "Get real time events from the server"}, + {"exec", "Run a command in a running container"}, + {"export", "Export a container's filesystem as a tar archive"}, + {"history", "Show the history of an image"}, + {"images", "List images"}, + {"import", "Import the contents from a tarball to create a filesystem image"}, + {"info", "Display system-wide information"}, + {"inspect", "Return low-level information on a container or image"}, + {"kill", "Kill a running container"}, + {"load", "Load an image from a tar archive or STDIN"}, + {"login", "Register or log in to a Docker registry"}, + {"logout", "Log out from a Docker registry"}, + {"logs", "Fetch the logs of a container"}, + {"port", "List port mappings or a specific mapping for the CONTAINER"}, + {"pause", "Pause all processes within a container"}, + {"ps", "List containers"}, + {"pull", "Pull an image or a repository from a registry"}, + {"push", "Push an image or a repository to a registry"}, + {"rename", "Rename a container"}, + {"restart", "Restart a running container"}, + {"rm", "Remove one or more containers"}, + {"rmi", "Remove one or more images"}, + {"run", "Run a command in a new container"}, + {"save", "Save an image(s) to a tar archive"}, + {"search", "Search the Docker Hub for images"}, + {"start", "Start one or more stopped containers"}, + {"stats", "Display a live stream of container(s) resource usage statistics"}, + {"stop", "Stop a running container"}, + {"tag", "Tag an image into a repository"}, + {"top", "Display the running processes of a container"}, + {"unpause", "Unpause all processes within a container"}, + {"version", "Show the Docker version information"}, + {"wait", "Block until a container stops, then print its exit code"}, +} diff --git a/dockerinit/dockerinit.go b/dockerinit/dockerinit.go new file mode 100644 index 00000000..a6754b05 --- /dev/null +++ b/dockerinit/dockerinit.go @@ -0,0 +1,12 @@ +package main + +import ( + _ "github.com/docker/docker/daemon/execdriver/lxc" + _ "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/pkg/reexec" +) + +func main() { + // Running in init mode + reexec.Init() +} diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 00000000..d22a3d70 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,2 @@ +# avoid commiting the awsconfig file used for releases +awsconfig diff --git a/docs/Dockerfile b/docs/Dockerfile new file mode 100644 index 00000000..e008954f --- /dev/null +++ b/docs/Dockerfile @@ -0,0 +1,13 @@ +FROM docs/base:latest +MAINTAINER Mary Anthony (@moxiegirl) + +# To get the git info for this repo +COPY . /src + +COPY . /docs/content/ + +WORKDIR /docs/content + +RUN /docs/content/touch-up.sh + +WORKDIR /docs diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..021e8f6e --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,55 @@ +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate + +# env vars passed through directly to Docker's build scripts +# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily +# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILDFLAGS \ + -e DOCKER_CLIENTONLY \ + -e DOCKER_EXECDRIVER \ + -e DOCKER_GRAPHDRIVER \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) + +# to allow `make DOCSPORT=9000 docs` +DOCSPORT := 8000 + +# Get the IP ADDRESS +DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") +HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") +HUGO_BIND_IP=0.0.0.0 + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) + + +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE + +# for some docs workarounds (see below in "docs-build" target) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) + +default: docs + +docs: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + +docs-draft: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + + +docs-shell: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash + + +docs-build: +# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files +# echo "$(GIT_BRANCH)" > GIT_BRANCH +# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET +# echo "$(GITCOMMIT)" > GITCOMMIT + docker build -t "$(DOCKER_DOCS_IMAGE)" . diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..fcde0691 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,299 @@ + + +# Docker Documentation + +The source for Docker documentation is in this directory. Our +documentation uses extended Markdown, as implemented by +[MkDocs](http://mkdocs.org). The current release of the Docker documentation +resides on [https://docs.docker.com](https://docs.docker.com). + +## Understanding the documentation branches and processes + +Docker has two primary branches for documentation: + +| Branch | Description | URL (published via commit-hook) | +|----------|--------------------------------|------------------------------------------------------------------------------| +| `docs` | Official release documentation | [https://docs.docker.com](https://docs.docker.com) | +| `master` | Merged but unreleased development work | [http://docs.master.dockerproject.org](http://docs.master.dockerproject.org) | + +Additions and updates to upcoming releases are made in a feature branch off of +the `master` branch. The Docker maintainers also support a `docs` branch that +contains the last release of documentation. + +After a release, documentation updates are continually merged into `master` as +they occur. This work includes new documentation for forthcoming features, bug +fixes, and other updates. Docker's CI system automatically builds and updates +the `master` documentation after each merge and posts it to +[http://docs.master.dockerproject.org](http://docs.master.dockerproject.org). + +Periodically, the Docker maintainers update `docs.docker.com` between official +releases of Docker. They do this by cherry-picking commits from `master`, +merging them into `docs`, and then publishing the result. + +In the rare case where a change is not forward-compatible, changes may be made +on other branches by special arrangement with the Docker maintainers. + +### Quickstart for documentation contributors + +If you are a new or beginner contributor, we encourage you to read through the +[our detailed contributors +guide](https://docs.docker.com/project/who-written-for/). The guide explains in +detail, with examples, how to contribute. If you are an experienced contributor +this quickstart should be enough to get you started. + +The following is the essential workflow for contributing to the documentation: + +1. Fork the `docker/docker` repository. + +2. Clone the repository to your local machine. + +3. Select an issue from `docker/docker` to work on or submit a proposal of your +own. + +4. Create a feature branch from `master` in which to work. + + By basing from `master` your work is automatically included in the next + release. It also allows docs maintainers to easily cherry-pick your changes + into the `docs` release branch. + +4. Modify existing or add new `.md` files to the `docs` directory. + + If you add a new document (`.md`) file, you must also add it to the + appropriate section of the `docs/mkdocs.yml` file in this repository. + + +5. As you work, build the documentation site locally to see your changes. + + The `docker/docker` repository contains a `Dockerfile` and a `Makefile`. + Together, these create a development environment in which you can build and + run a container running the Docker documentation website. To build the + documentation site, enter `make docs` at the root of your `docker/docker` + fork: + + $ make docs + .... (lots of output) .... + docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve + Running at: http://0.0.0.0:8000/ + Live reload enabled. + Hold ctrl+c to quit. + + + The build creates an image containing all the required tools, adds the local + `docs/` directory and generates the HTML files. Then, it runs a Docker + container with this image. + + The container exposes port 8000 on the localhost so that you can connect and + see your changes. If you use Docker Machine, the `docker-machine ip + ` command gives you the address of your server. + +6. Check your writing for style and mechanical errors. + + Use our [documentation style + guide](https://docs.docker.com/project/doc-style/) to check style. There are + several [good grammar and spelling online + checkers](http://www.hemingwayapp.com/) that can check your writing + mechanics. + +7. Squash your commits on your branch. + +8. Make a pull request from your fork back to Docker's `master` branch. + +9. Work with the reviewers until your change is approved and merged. + +### Debugging and testing + +If you have any issues you need to debug, you can use `make docs-shell` and then +run `mkdocs serve`. You can use `make docs-test` to generate a report of missing +links that are referenced in the documentation—there should be none. + +## Style guide + +If you have questions about how to write for Docker's documentation, please see +the [style guide](project/doc-style.md). The style guide provides +guidance about grammar, syntax, formatting, styling, language, or tone. If +something isn't clear in the guide, please submit an issue to let us know or +submit a pull request to help us improve it. + + +## Publishing documentation (for Docker maintainers) + +To publish Docker's documentation you need to have Docker up and running on your +machine. You'll also need a `docs/awsconfig` file containing the settings you +need to access the AWS bucket you'll be deploying to. + +The process for publishing is to build first to an AWS bucket, verify the build, +and then publish the final release. + +1. Have Docker installed and running on your machine. + +2. Ask the core maintainers for the `awsconfig` file. + +3. Copy the `awsconfig` file to the `docs/` directory. + + The `awsconfig` file contains the profiles of the S3 buckets for our + documentation sites. (If needed, the release script creates an S3 bucket and + pushes the files to it.) Each profile has this format: + + [profile dowideit-docs] + aws_access_key_id = IHOIUAHSIDH234rwf.... + aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... + region = ap-southeast-2 + + The `profile` name must be the same as the name of the bucket you are + deploying to. + +4. Call the `make` from the `docker` directory. + + $ make AWS_S3_BUCKET=dowideit-docs docs-release + + This publishes _only_ to the `http://bucket-url/v1.2/` version of the + documentation. + +5. If you're publishing the current release's documentation, you need to also +update the root docs pages by running + + $ make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release + +### Errors publishing using a Docker Machine VM + +Sometimes, in a Windows or Mac environment, the publishing procedure returns this +error: + + Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2: + dial unix /var/run/docker.sock: no such file or directory. + +If this happens, set the Docker host. Run the following command to get the +variables in your shell: + + docker-machine env + +Then, set your environment accordingly. + +## Cherry-picking documentation changes to update an existing release. + +Whenever the core team makes a release, they publish the documentation based on +the `release` branch. At that time, the `release` branch is copied into the +`docs` branch. The documentation team makes updates between Docker releases by +cherry-picking changes from `master` into any of the documentation branches. +Typically, we cherry-pick into the `docs` branch. + +For example, to update the current release's docs, do the following: + +1. Go to your `docker/docker` fork and get the latest from master. + + $ git fetch upstream + +2. Checkout a new branch based on `upstream/docs`. + + You should give your new branch a descriptive name. + + $ git checkout -b post-1.2.0-docs-update-1 upstream/docs + +3. In a browser window, open [https://github.com/docker/docker/commits/master]. + +4. Locate the merges you want to publish. + + You should only cherry-pick individual commits; do not cherry-pick merge + commits. To minimize merge conflicts, start with the oldest commit and work + your way forward in time. + +5. Copy the commit SHA from GitHub. + +6. Cherry-pick the commit. + + $ git cherry-pick -x fe845c4 + +7. Repeat until you have cherry-picked everything you want to merge. + +8. Push your changes to your fork. + + $ git push origin post-1.2.0-docs-update-1 + +9. Make a pull request to merge into the `docs` branch. + + Do __NOT__ merge into `master`. + +10. Have maintainers review your pull request. + +11. Once the PR has the needed "LGTMs", merge it on GitHub. + +12. Return to your local fork and make sure you are still on the `docs` branch. + + $ git checkout docs + +13. Fetch your merged pull request from `docs`. + + $ git fetch upstream/docs + +14. Ensure your branch is clean and set to the latest. + + $ git reset --hard upstream/docs + +15. Copy the `awsconfig` file into the `docs` directory. + +16. Make the beta documentation + + $ make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release + +17. Open [the beta +website](http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/) site +and make sure what you published is correct. + +19. When you're happy with your content, publish the docs to our live site: + + $ make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes +DISTRIBUTION_ID=C2K6......FL2F docs-release + +20. Test the uncached version of the live docs at [http://docs.docker.com.s3-website-us-east-1.amazonaws.com/] + + +### Caching and the docs + +New docs do not appear live on the site until the cache (a complex, distributed +CDN system) is flushed. The `make docs-release` command flushes the cache _if_ +the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID. The cache flush +can take at least 15 minutes to run and you can check its progress with the CDN +Cloudfront Purge Tool Chrome app. + +## Removing files from the docs.docker.com site + +Sometimes it becomes necessary to remove files from the historical published documentation. +The most reliable way to do this is to do it directly using `aws s3` commands running in a +docs container: + +Start the docs container like `make docs-shell`, but bind mount in your `awsconfig`: + +``` +docker run --rm -it -v $(CURDIR)/docs/awsconfig:/docs/awsconfig docker-docs:master bash +``` + +and then the following example shows deleting 2 documents from s3, and then requesting the +CloudFlare cache to invalidate them: + + +``` +export BUCKET BUCKET=docs.docker.com +export AWS_CONFIG_FILE=$(pwd)/awsconfig +aws s3 --profile $BUCKET ls s3://$BUCKET +aws s3 --profile $BUCKET rm s3://$BUCKET/v1.0/reference/api/docker_io_oauth_api/index.html +aws s3 --profile $BUCKET rm s3://$BUCKET/v1.1/reference/api/docker_io_oauth_api/index.html + +aws configure set preview.cloudfront true +export DISTRIBUTION_ID=YUTIYUTIUTIUYTIUT +aws cloudfront create-invalidation --profile docs.docker.com --distribution-id $DISTRIBUTION_ID --invalidation-batch '{"Paths":{"Quantity":1, "Items":["/v1.0/reference/api/docker_io_oauth_api/"]},"CallerReference":"6Mar2015sventest1"}' +aws cloudfront create-invalidation --profile docs.docker.com --distribution-id $DISTRIBUTION_ID --invalidation-batch '{"Paths":{"Quantity":1, "Items":["/v1.1/reference/api/docker_io_oauth_api/"]},"CallerReference":"6Mar2015sventest1"}' +``` + +### Generate the man pages + +For information on generating man pages (short for manual page), see [the man +page directory](https://github.com/docker/docker/tree/master/docker) in this +project. + + + + diff --git a/docs/article-img/architecture.svg b/docs/article-img/architecture.svg new file mode 100644 index 00000000..afe563ae --- /dev/null +++ b/docs/article-img/architecture.svg @@ -0,0 +1,2597 @@ + + + + + 2014-04-15 00:37Z + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/article-img/ipv6_basic_host_config.gliffy b/docs/article-img/ipv6_basic_host_config.gliffy new file mode 100644 index 00000000..8d0450fc --- /dev/null +++ b/docs/article-img/ipv6_basic_host_config.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":414,"height":127,"nodeIndex":173,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":8.5,"y":0.5},"max":{"x":413.75,"y":126.5}},"objects":[{"x":6.5,"y":106.0,"rotation":0.0,"id":9,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":19.5,"y":9.0,"rotation":0.0,"id":7,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":31.5,"y":23.5,"rotation":0.0,"id":4,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":5,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":11.75,"y":0.5,"rotation":0.0,"id":60,"width":402.0,"height":126.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":146.5,"y":83.0,"rotation":0.0,"id":164,"width":249.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:1::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":146.5,"y":27.5,"rotation":0.0,"id":73,"width":249.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#fff2cc","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":true,"shadow":true}},"lineStyles":{"global":{"stroke":"#d9d9d9"}},"textStyles":{"global":{"size":"12px","color":"#b7b7b7"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/article-img/ipv6_basic_host_config.svg b/docs/article-img/ipv6_basic_host_config.svg new file mode 100644 index 00000000..0095b8bd --- /dev/null +++ b/docs/article-img/ipv6_basic_host_config.svg @@ -0,0 +1 @@ +Host2eth02001:db8::1/64docker0fe80::1/64ip -6routeadddefaultviafe80::1deveth0ip -6routeadd2001:db8:1::/64devdocker0 \ No newline at end of file diff --git a/docs/article-img/ipv6_ndp_proxying.gliffy b/docs/article-img/ipv6_ndp_proxying.gliffy new file mode 100644 index 00000000..698723e1 --- /dev/null +++ b/docs/article-img/ipv6_ndp_proxying.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":616,"height":438,"nodeIndex":207,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":3,"y":-7.75},"max":{"x":615.5,"y":437.5}},"objects":[{"x":173.0,"y":117.0,"rotation":0.0,"id":190,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":186,"py":0.0,"px":0.2928932188134524}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":"4.0,4.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[120.21067811865476,-7.0],[335.78932188134524,57.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":195.0,"y":117.0,"rotation":0.0,"id":83,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":222.5,"y":35.0,"rotation":0.0,"id":0,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#fff2cc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":26.0,"y":109.0,"rotation":0.0,"id":33,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":6,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":2,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[225.78932188134524,0.9999999999999858],[57.710678118654755,65.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":20.289321881345245,"y":150.0,"rotation":0.0,"id":32,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":4,"py":0.0,"px":0.2928932188134524}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":0,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[333.5,24.5],[272.9213562373095,-40.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":271.0,"y":37.0,"rotation":0.0,"id":89,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#d9d9d9","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[1.5,-2.0],[1.5,-21.125],[1.5,-21.125],[1.5,-40.25]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":151.0,"y":115.0,"rotation":0.0,"id":183,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":0,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":179,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[121.5,-5.0],[62.5,59.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":455.5,"y":257.0,"rotation":0.0,"id":200,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":200,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":5,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

expected Container location

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":467.5,"y":156.0,"rotation":0.0,"id":185,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::c00y/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":479.5,"y":174.5,"rotation":0.0,"id":186,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e2e2e2","gradient":false,"dashStyle":"2,2","dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":187,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container x

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":151.5,"y":156.0,"rotation":0.0,"id":178,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::b001/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":163.5,"y":174.5,"rotation":0.0,"id":179,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":180,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":299.5,"y":257.0,"rotation":0.0,"id":9,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":317.5,"y":156.0,"rotation":0.0,"id":7,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::c001/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":1.0,"y":156.0,"rotation":0.0,"id":6,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::a001/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":324.5,"y":174.5,"rotation":0.0,"id":4,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":5,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host3

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":13.0,"y":174.5,"rotation":0.0,"id":2,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":-142.5,"y":118.5,"rotation":0.0,"id":31,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":4,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":25,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[537.7106781186548,131.0],[602.0,204.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":-181.5,"y":122.5,"rotation":0.0,"id":30,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":4,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":27,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[535.2893218813452,127.0],[473.0,200.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":386.0,"y":306.0,"rotation":0.0,"id":78,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::c00a/125

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":218.0,"y":306.0,"rotation":0.0,"id":77,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":21,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::c009/125

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":409.5,"y":323.0,"rotation":0.0,"id":25,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":26,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":241.5,"y":323.0,"rotation":0.0,"id":27,"width":99.99999999999999,"height":99.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":16,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":28,"width":95.99999999999999,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":207.75,"y":297.5,"rotation":0.0,"id":58,"width":339.75,"height":140.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#e2e2e2","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":false,"shadow":true}},"lineStyles":{"global":{"stroke":"#cccccc","strokeWidth":2,"dashStyle":"4.0,4.0"}},"textStyles":{"global":{"size":"12px","italic":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/article-img/ipv6_ndp_proxying.svg b/docs/article-img/ipv6_ndp_proxying.svg new file mode 100644 index 00000000..49b2da9f --- /dev/null +++ b/docs/article-img/ipv6_ndp_proxying.svg @@ -0,0 +1 @@ +RouterHost1Host3eth02001:db8::a001/64eth02001:db8::c001/64docker0fe80::1/64Container1Container2eth02001:db8::c009/125eth02001:db8::c00a/125eth02001:db8::1/64Host2eth02001:db8::b001/64Containerxeth02001:db8::c00y/64expectedContainerlocation \ No newline at end of file diff --git a/docs/article-img/ipv6_routed_network_example.gliffy b/docs/article-img/ipv6_routed_network_example.gliffy new file mode 100644 index 00000000..544fd52d --- /dev/null +++ b/docs/article-img/ipv6_routed_network_example.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":893,"height":447,"nodeIndex":185,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":-17.000680271168676,"y":7},"max":{"x":892.767693574114,"y":447}},"objects":[{"x":17.5,"y":205.5,"rotation":0.0,"id":167,"width":238.5,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:1::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":231.28932188134524,"y":95.0,"rotation":0.0,"id":120,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":6,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":0.0,"px":0.2928932188134524}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":131,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[267.5,47.5],[217.9213562373095,-13.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":187.0,"y":206.5,"rotation":0.0,"id":121,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":148,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[130.28932188134524,11.0],[-79.0,91.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":174.0,"y":217.5,"rotation":0.0,"id":122,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":146,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[164.0,0.0],[120.0,81.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":33.50000000000003,"y":409.0,"rotation":0.0,"id":123,"width":346.49999999999994,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":3.5000000000000284,"y":268.5,"rotation":0.0,"id":124,"width":411.00000000000006,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":237.0,"y":54.0,"rotation":0.0,"id":125,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":131,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":140,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[170.78932188134524,27.999999999999986],[121.71067811865476,88.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":378.5,"y":7.0,"rotation":0.0,"id":131,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e2e2e2","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":132,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Layer 2 Switch

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":785.0,"y":195.0,"rotation":0.0,"id":136,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":32,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":143,"py":0.6187943262411347,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"8.0,8.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[78.75000000000011,-0.25],[-798.0006802711687,-3.410605131648481E-13]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":262.0,"y":224.0,"rotation":0.0,"id":138,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":278.0,"y":126.0,"rotation":0.0,"id":139,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:0::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":288.0,"y":142.5,"rotation":0.0,"id":140,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":141,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":3.4999999999999716,"y":107.5,"rotation":0.0,"id":142,"width":411.0,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":221.0,"y":283.0,"rotation":0.0,"id":144,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:1::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":34.000000000000014,"y":283.0,"rotation":0.0,"id":145,"width":149.99999999999997,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:1::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":244.0,"y":299.0,"rotation":0.0,"id":146,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":147,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":58.0,"y":298.0,"rotation":0.0,"id":148,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":20,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":149,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":317.0,"y":436.5,"rotation":0.0,"id":158,"width":223.00000000000003,"height":11.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

containers' link-local addresses are not displayed

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":17.5,"y":148.0,"rotation":0.0,"id":137,"width":291.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:0::/64 dev eth0

ip -6 route add 2001:db8:2::/64 via 2001:db8:0::2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":901.7500000000001,"y":195.0,"rotation":0.0,"id":172,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-12.982306425886122,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":670.0,"y":284.0,"rotation":0.0,"id":155,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:2::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":479.0,"y":284.0,"rotation":0.0,"id":150,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:2::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":488.75,"y":408.0,"rotation":0.0,"id":152,"width":339.75,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":694.5,"y":298.0,"rotation":0.0,"id":156,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":157,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":501.5,"y":298.0,"rotation":0.0,"id":153,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":25,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":154,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":444.5,"y":223.0,"rotation":0.0,"id":160,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":460.5,"y":128.0,"rotation":0.0,"id":159,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:0::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":469.5,"y":142.5,"rotation":0.0,"id":161,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":162,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":139.5,"y":86.5,"rotation":0.0,"id":126,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":156,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[400.71067811865476,131.0],[605.0,211.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":100.5,"y":90.5,"rotation":0.0,"id":127,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":153,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[419.0,127.0],[451.0,207.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":447.75,"y":268.5,"rotation":0.0,"id":151,"width":416.0000000000001,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":447.75,"y":107.5,"rotation":0.0,"id":143,"width":416.0000000000001,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":795.7500000000001,"y":307.5,"rotation":270.0,"id":173,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

managed by Docker

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":879.7500000000001,"y":417.0,"rotation":0.0,"id":174,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,14.008510484195028],[0.0,-221.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":898.7500000000001,"y":432.0,"rotation":0.0,"id":171,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.981657549458532,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":582.5,"y":151.0,"rotation":0.0,"id":135,"width":285.25000000000017,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:0::/64 dev eth0

ip -6 route add 2001:db8:1::/64 via 2001:db8:0::1 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":583.0,"y":204.0,"rotation":0.0,"id":168,"width":272.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:2::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#e2e2e2","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":true,"shadow":true}},"lineStyles":{"global":{"stroke":"#000000","strokeWidth":1,"dashStyle":"8.0,8.0"}},"textStyles":{}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/article-img/ipv6_routed_network_example.svg b/docs/article-img/ipv6_routed_network_example.svg new file mode 100644 index 00000000..c97b02c2 --- /dev/null +++ b/docs/article-img/ipv6_routed_network_example.svg @@ -0,0 +1 @@ +Layer 2 SwitchHost1Host2eth0 2001:db8:0::1/64eth0 2001:db8:0::2/64docker0 fe80::1/64docker0 fe80::1/64Container1-1Container1-2eth0 2001:db8:1::1/64Container2-1Container2-2ip -6 route add 2001:db8:0::/64 dev eth0ip -6 route add 2001:db8:2::/64 via 2001:db8:0::2ip -6 route add default via fe80::1 dev eth0ip -6 route add default via fe80::1 dev eth0ip -6 route add 2001:db8:0::/64 dev eth0ip -6 route add 2001:db8:1::/64 via 2001:db8:0::1 eth0 2001:db8:1::2/64eth0 2001:db8:2::1/64eth0 2001:db8:2::2/64containers' link-local addresses are not displayedip -6 route add 2001:db8:1::/64 dev docker0ip -6 route add 2001:db8:2::/64 dev docker0managed by Docker \ No newline at end of file diff --git a/docs/article-img/ipv6_slash64_subnet_config.gliffy b/docs/article-img/ipv6_slash64_subnet_config.gliffy new file mode 100644 index 00000000..6914fd07 --- /dev/null +++ b/docs/article-img/ipv6_slash64_subnet_config.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":550,"height":341,"nodeIndex":88,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":2.5,"y":2.5},"max":{"x":550,"y":341}},"objects":[{"x":10.5,"y":53.5,"rotation":0.0,"id":74,"width":150.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":37.0,"y":2.5,"rotation":0.0,"id":72,"width":100.0,"height":46.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#d9d9d9","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":73,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":89.5,"y":83.5,"rotation":0.0,"id":59,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Routed Network:
2001:db8:23:42::/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":313.0,"y":314.0,"rotation":0.0,"id":39,"width":235.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":352.0,"y":185.5,"rotation":0.0,"id":36,"width":169.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:23:42:1::2/80

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":351.0,"y":49.5,"rotation":0.0,"id":29,"width":171.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:23:42:1::1/80

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":382.1250000000001,"y":202.5,"rotation":0.0,"id":30,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":31,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":382.0,"y":65.5,"rotation":0.0,"id":32,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":10,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":33,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":15.125000000000057,"y":264.0,"rotation":0.0,"id":20,"width":273.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

ip -6 route add 2001:db8:23:42:1::/80 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":120.0,"y":178.5,"rotation":0.0,"id":21,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":13.0,"y":132.5,"rotation":0.0,"id":22,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:23:42::1/80

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":38.0,"y":149.0,"rotation":0.0,"id":23,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":24,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

host1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":-118.0,"y":123.0,"rotation":0.0,"id":44,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":23,"py":0.7071067811865475,"px":0.9999999999999998}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":30,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[255.99999999999997,79.03300858899107],[500.1250000000001,129.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":-138.0,"y":129.0,"rotation":0.0,"id":43,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":23,"py":0.29289321881345237,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":32,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[276.0,41.966991411008934],[520.0,-13.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":313.0,"y":40.0,"rotation":0.0,"id":34,"width":237.00000000000003,"height":301.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":87.0,"y":150.0,"rotation":0.0,"id":58,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":23,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":72,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.0,-1.0],[0.0,-101.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":2.5,"y":118.50000000000001,"rotation":0.0,"id":25,"width":292.0,"height":178.99999999999997,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#cccccc"}},"textStyles":{"global":{"bold":true,"italic":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v1.default","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/article-img/ipv6_slash64_subnet_config.svg b/docs/article-img/ipv6_slash64_subnet_config.svg new file mode 100644 index 00000000..70b140e2 --- /dev/null +++ b/docs/article-img/ipv6_slash64_subnet_config.svg @@ -0,0 +1 @@ +host1eth02001:db8:23:42::1/80docker0fe80::1/64ip -6routeadddefaultviafe80::1deveth0ip-6routeadd2001:db8:23:42:1::/80devdocker0container1-1container1-2eth02001:db8:23:42:1::1/80eth02001:db8:23:42:1::2/80ip-6routeadddefaultviafe80::1deveth0RoutedNetwork:2001:db8:23:42::/64Routerfe80::1/64 \ No newline at end of file diff --git a/docs/article-img/ipv6_switched_network_example.gliffy b/docs/article-img/ipv6_switched_network_example.gliffy new file mode 100644 index 00000000..75cbfcaa --- /dev/null +++ b/docs/article-img/ipv6_switched_network_example.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":893,"height":448,"nodeIndex":185,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":-17.000680271168676,"y":7},"max":{"x":892.767693574114,"y":447.5}},"objects":[{"x":17.5,"y":205.5,"rotation":0.0,"id":167,"width":238.5,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:1::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":231.28932188134524,"y":95.0,"rotation":0.0,"id":120,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":6,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":0.0,"px":0.2928932188134524}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":131,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[267.5,47.5],[217.9213562373095,-13.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":187.0,"y":206.5,"rotation":0.0,"id":121,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":148,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[130.28932188134524,11.0],[-79.0,91.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":174.0,"y":217.5,"rotation":0.0,"id":122,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":146,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[164.0,0.0],[120.0,81.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":33.50000000000003,"y":409.0,"rotation":0.0,"id":123,"width":346.49999999999994,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":3.5000000000000284,"y":268.5,"rotation":0.0,"id":124,"width":411.00000000000006,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":237.0,"y":54.0,"rotation":0.0,"id":125,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":131,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":140,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[170.78932188134524,27.999999999999986],[121.71067811865476,88.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":378.5,"y":7.0,"rotation":0.0,"id":131,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e2e2e2","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":132,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Level 2 Switch

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":785.0,"y":195.0,"rotation":0.0,"id":136,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":32,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":143,"py":0.6187943262411347,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"8.0,8.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[78.75000000000011,-0.25],[-798.0006802711687,-3.410605131648481E-13]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":262.0,"y":224.0,"rotation":0.0,"id":138,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":278.0,"y":126.0,"rotation":0.0,"id":139,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:0::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":288.0,"y":142.5,"rotation":0.0,"id":140,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":141,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":3.4999999999999716,"y":107.5,"rotation":0.0,"id":142,"width":411.0,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":221.0,"y":283.0,"rotation":0.0,"id":144,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:1::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":34.000000000000014,"y":283.0,"rotation":0.0,"id":145,"width":149.99999999999997,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:1::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":244.0,"y":299.0,"rotation":0.0,"id":146,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":147,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":58.0,"y":298.0,"rotation":0.0,"id":148,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":20,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":149,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":317.0,"y":436.5,"rotation":0.0,"id":158,"width":223.00000000000003,"height":11.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

containers' link-local addresses are not displayed

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":17.5,"y":148.0,"rotation":0.0,"id":137,"width":291.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:0::/64 dev eth0

ip -6 route add 2001:db8:2::/64 via 2001:db8:0::2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":901.7500000000001,"y":195.0,"rotation":0.0,"id":172,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-12.982306425886122,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":670.0,"y":284.0,"rotation":0.0,"id":155,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:2::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":479.0,"y":284.0,"rotation":0.0,"id":150,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:2::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":488.75,"y":408.0,"rotation":0.0,"id":152,"width":339.75,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":694.5,"y":298.0,"rotation":0.0,"id":156,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":157,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":501.5,"y":298.0,"rotation":0.0,"id":153,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":25,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":154,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":444.5,"y":223.0,"rotation":0.0,"id":160,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":460.5,"y":128.0,"rotation":0.0,"id":159,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:0::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":469.5,"y":142.5,"rotation":0.0,"id":161,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":162,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":139.5,"y":86.5,"rotation":0.0,"id":126,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":156,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[400.71067811865476,131.0],[605.0,211.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":100.5,"y":90.5,"rotation":0.0,"id":127,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":153,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[419.0,127.0],[451.0,207.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":447.75,"y":268.5,"rotation":0.0,"id":151,"width":416.0000000000001,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":447.75,"y":107.5,"rotation":0.0,"id":143,"width":416.0000000000001,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":795.7500000000001,"y":307.5,"rotation":270.0,"id":173,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

managed by Docker

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":879.7500000000001,"y":417.0,"rotation":0.0,"id":174,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,14.008510484195028],[0.0,-221.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":898.7500000000001,"y":432.0,"rotation":0.0,"id":171,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.981657549458532,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":582.5,"y":151.0,"rotation":0.0,"id":135,"width":285.25000000000017,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:0::/64 dev eth0

ip -6 route add 2001:db8:1::/64 via 2001:db8:0::1 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":583.0,"y":204.0,"rotation":0.0,"id":168,"width":272.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:2::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#e2e2e2","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":true,"shadow":true}},"lineStyles":{"global":{"stroke":"#000000","strokeWidth":1,"dashStyle":"8.0,8.0"}},"textStyles":{}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/article-img/ipv6_switched_network_example.svg b/docs/article-img/ipv6_switched_network_example.svg new file mode 100644 index 00000000..4cbf709b --- /dev/null +++ b/docs/article-img/ipv6_switched_network_example.svg @@ -0,0 +1 @@ +Level2SwitchHost1Host2eth02001:db8:0::1/64eth02001:db8:0::2/64docker0fe80::1/64docker0fe80::1/64Container1-1Container1-2eth02001:db8:1::1/64Container2-1Container2-2ip -6routeadd2001:db8:0::/64deveth0ip -6routeadd2001:db8:2::/64via2001:db8:0::2ip-6routeadddefaultviafe80::1deveth0ip-6routeadddefaultviafe80::1deveth0ip -6routeadd2001:db8:0::/64deveth0ip -6routeadd2001:db8:1::/64via2001:db8:0::1eth02001:db8:1::2/64eth02001:db8:2::1/64eth02001:db8:2::2/64containers'link-localaddressesarenotdisplayedip -6routeadd2001:db8:1::/64devdocker0ip -6routeadd2001:db8:2::/64devdocker0managedbyDocker \ No newline at end of file diff --git a/docs/articles/ambassador_pattern_linking.md b/docs/articles/ambassador_pattern_linking.md new file mode 100644 index 00000000..5a045403 --- /dev/null +++ b/docs/articles/ambassador_pattern_linking.md @@ -0,0 +1,157 @@ + + +# Link via an ambassador container + +## Introduction + +Rather than hardcoding network links between a service consumer and +provider, Docker encourages service portability, for example instead of: + + (consumer) --> (redis) + +Requiring you to restart the `consumer` to attach it to a different +`redis` service, you can add ambassadors: + + (consumer) --> (redis-ambassador) --> (redis) + +Or + + (consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis) + +When you need to rewire your consumer to talk to a different Redis +server, you can just restart the `redis-ambassador` container that the +consumer is connected to. + +This pattern also allows you to transparently move the Redis server to a +different docker host from the consumer. + +Using the `svendowideit/ambassador` container, the link wiring is +controlled entirely from the `docker run` parameters. + +## Two host example + +Start actual Redis server on one Docker host + + big-server $ docker run -d --name redis crosbymichael/redis + +Then add an ambassador linked to the Redis server, mapping a port to the +outside world + + big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador + +On the other host, you can set up another ambassador setting environment +variables for each remote port we want to proxy to the `big-server` + + client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador + +Then on the `client-server` host, you can use a Redis client container +to talk to the remote Redis server, just by linking to the local Redis +ambassador. + + client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli + redis 172.17.0.160:6379> ping + PONG + +## How it works + +The following example shows what the `svendowideit/ambassador` container +does automatically (with a tiny amount of `sed`) + +On the Docker host (192.168.1.52) that Redis will run on: + + # start actual redis server + $ docker run -d --name redis crosbymichael/redis + + # get a redis-cli container for connection testing + $ docker pull relateiq/redis-cli + + # test the redis server by talking to it directly + $ docker run -t -i --rm --link redis:redis relateiq/redis-cli + redis 172.17.0.136:6379> ping + PONG + ^D + + # add redis ambassador + $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh + +In the `redis_ambassador` container, you can see the linked Redis +containers `env`: + + $ env + REDIS_PORT=tcp://172.17.0.136:6379 + REDIS_PORT_6379_TCP_ADDR=172.17.0.136 + REDIS_NAME=/redis_ambassador/redis + HOSTNAME=19d7adf4705e + REDIS_PORT_6379_TCP_PORT=6379 + HOME=/ + REDIS_PORT_6379_TCP_PROTO=tcp + container=lxc + REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379 + TERM=xterm + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + PWD=/ + +This environment is used by the ambassador `socat` script to expose Redis +to the world (via the `-p 6379:6379` port mapping): + + $ docker rm redis_ambassador + $ sudo ./contrib/mkimage-unittest.sh + $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh + + $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379 + +Now ping the Redis server via the ambassador: + +Now go to a different server: + + $ sudo ./contrib/mkimage-unittest.sh + $ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh + + $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379 + +And get the `redis-cli` image so we can talk over the ambassador bridge. + + $ docker pull relateiq/redis-cli + $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli + redis 172.17.0.160:6379> ping + PONG + +## The svendowideit/ambassador Dockerfile + +The `svendowideit/ambassador` image is a small `busybox` image with +`socat` built in. When you start the container, it uses a small `sed` +script to parse out the (possibly multiple) link environment variables +to set up the port forwarding. On the remote host, you need to set the +variable using the `-e` command line option. + + --expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379 + +Will forward the local `1234` port to the remote IP and port, in this +case `192.168.1.52:6379`. + + # + # + # first you need to build the docker-ut image + # using ./contrib/mkimage-unittest.sh + # then + # docker build -t SvenDowideit/ambassador . + # docker tag SvenDowideit/ambassador ambassador + # then to run it (on the host that has the real backend on it) + # docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador + # on the remote host, you can set up another ambassador + # docker run -t -i --name redis_ambassador --expose 6379 sh + + FROM docker-ut + MAINTAINER SvenDowideit@home.org.au + + + CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top diff --git a/docs/articles/b2d_volume_images/add_cd.png b/docs/articles/b2d_volume_images/add_cd.png new file mode 100644 index 00000000..50d7c4e7 Binary files /dev/null and b/docs/articles/b2d_volume_images/add_cd.png differ diff --git a/docs/articles/b2d_volume_images/add_new_controller.png b/docs/articles/b2d_volume_images/add_new_controller.png new file mode 100644 index 00000000..791a3105 Binary files /dev/null and b/docs/articles/b2d_volume_images/add_new_controller.png differ diff --git a/docs/articles/b2d_volume_images/add_volume.png b/docs/articles/b2d_volume_images/add_volume.png new file mode 100644 index 00000000..ea2d6f6e Binary files /dev/null and b/docs/articles/b2d_volume_images/add_volume.png differ diff --git a/docs/articles/b2d_volume_images/boot_order.png b/docs/articles/b2d_volume_images/boot_order.png new file mode 100644 index 00000000..a62cc95e Binary files /dev/null and b/docs/articles/b2d_volume_images/boot_order.png differ diff --git a/docs/articles/b2d_volume_images/gparted.png b/docs/articles/b2d_volume_images/gparted.png new file mode 100644 index 00000000..1a50155c Binary files /dev/null and b/docs/articles/b2d_volume_images/gparted.png differ diff --git a/docs/articles/b2d_volume_images/gparted2.png b/docs/articles/b2d_volume_images/gparted2.png new file mode 100644 index 00000000..e48e9f15 Binary files /dev/null and b/docs/articles/b2d_volume_images/gparted2.png differ diff --git a/docs/articles/b2d_volume_images/verify.png b/docs/articles/b2d_volume_images/verify.png new file mode 100644 index 00000000..843fa126 Binary files /dev/null and b/docs/articles/b2d_volume_images/verify.png differ diff --git a/docs/articles/b2d_volume_resize.md b/docs/articles/b2d_volume_resize.md new file mode 100644 index 00000000..23bd6a6e --- /dev/null +++ b/docs/articles/b2d_volume_resize.md @@ -0,0 +1,164 @@ + + +# Getting “no space left on device” errors with Boot2Docker? + +If you're using Boot2Docker with a large number of images, or the images you're +working with are very large, your pulls might start failing with "no space left +on device" errors when the Boot2Docker volume fills up. There are two solutions +you can try. + +## Solution 1: Add the `DiskImage` property in boot2docker profile + +The `boot2docker` command reads its configuration from the `$BOOT2DOCKER_PROFILE` if set, or `$BOOT2DOCKER_DIR/profile` or `$HOME/.boot2docker/profile` (on Windows this is `%USERPROFILE%/.boot2docker/profile`). + +1. View the existing configuration, use the `boot2docker config` command. + + $ boot2docker config + # boot2docker profile filename: /Users/mary/.boot2docker/profile + Init = false + Verbose = false + Driver = "virtualbox" + Clobber = true + ForceUpgradeDownload = false + SSH = "ssh" + SSHGen = "ssh-keygen" + SSHKey = "/Users/mary/.ssh/id_boot2docker" + VM = "boot2docker-vm" + Dir = "/Users/mary/.boot2docker" + ISOURL = "https://api.github.com/repos/boot2docker/boot2docker/releases" + ISO = "/Users/mary/.boot2docker/boot2docker.iso" + DiskSize = 20000 + Memory = 2048 + CPUs = 8 + SSHPort = 2022 + DockerPort = 0 + HostIP = "192.168.59.3" + DHCPIP = "192.168.59.99" + NetMask = [255, 255, 255, 0] + LowerIP = "192.168.59.103" + UpperIP = "192.168.59.254" + DHCPEnabled = true + Serial = false + SerialFile = "/Users/mary/.boot2docker/boot2docker-vm.sock" + Waittime = 300 + Retries = 75 + + The configuration shows you where `boot2docker` is looking for the `profile` file. It also output the settings that are in use. + + +2. Initialise a default file to customize using `boot2docker config > ~/.boot2docker/profile` command. + +3. Add the following lines to `$HOME/.boot2docker/profile`: + + # Disk image size in MB + DiskSize = 50000 + +4. Run the following sequence of commands to restart Boot2Docker with the new settings. + + $ boot2docker poweroff + $ boot2docker destroy + $ boot2docker init + $ boot2docker up + +## Solution 2: Increase the size of boot2docker volume + +This solution increases the volume size by first cloning it, then resizing it +using a disk partitioning tool. We recommend +[GParted](http://gparted.sourceforge.net/download.php/index.php). The tool comes +as a bootable ISO, is a free download, and works well with VirtualBox. + +1. Stop Boot2Docker + + Issue the command to stop the Boot2Docker VM on the command line: + + $ boot2docker stop + +2. Clone the VMDK image to a VDI image + + Boot2Docker ships with a VMDK image, which can't be resized by VirtualBox's + native tools. We will instead create a VDI volume and clone the VMDK volume to + it. + +3. Using the command line VirtualBox tools, clone the VMDK image to a VDI image: + + $ vboxmanage clonehd /full/path/to/boot2docker-hd.vmdk /full/path/to/.vdi --format VDI --variant Standard + +4. Resize the VDI volume + + Choose a size that will be appropriate for your needs. If you're spinning up a + lot of containers, or your containers are particularly large, larger will be + better: + + $ vboxmanage modifyhd /full/path/to/.vdi --resize + +5. Download a disk partitioning tool ISO + + To resize the volume, we'll use [GParted](http://gparted.sourceforge.net/download.php/). + Once you've downloaded the tool, add the ISO to the Boot2Docker VM IDE bus. + You might need to create the bus before you can add the ISO. + + > **Note:** + > It's important that you choose a partitioning tool that is available as an ISO so + > that the Boot2Docker VM can be booted with it. + + + + + + + + +


+ +6. Add the new VDI image + + In the settings for the Boot2Docker image in VirtualBox, remove the VMDK image + from the SATA controller and add the VDI image. + + + +7. Verify the boot order + + In the **System** settings for the Boot2Docker VM, make sure that **CD/DVD** is + at the top of the **Boot Order** list. + + + +8. Boot to the disk partitioning ISO + + Manually start the Boot2Docker VM in VirtualBox, and the disk partitioning ISO + should start up. Using GParted, choose the **GParted Live (default settings)** + option. Choose the default keyboard, language, and XWindows settings, and the + GParted tool will start up and display the VDI volume you created. Right click + on the VDI and choose **Resize/Move**. + + + +9. Drag the slider representing the volume to the maximum available size. + +10. Click **Resize/Move** followed by **Apply**. + + + +11. Quit GParted and shut down the VM. + +12. Remove the GParted ISO from the IDE controller for the Boot2Docker VM in +VirtualBox. + +13. Start the Boot2Docker VM + + Fire up the Boot2Docker VM manually in VirtualBox. The VM should log in + automatically, but if it doesn't, the credentials are `docker/tcuser`. Using + the `df -h` command, verify that your changes took effect. + + + +You're done! diff --git a/docs/articles/baseimages.md b/docs/articles/baseimages.md new file mode 100644 index 00000000..b3875aa5 --- /dev/null +++ b/docs/articles/baseimages.md @@ -0,0 +1,71 @@ + + +# Create a base image + +So you want to create your own [*Base Image*]( +/terms/image/#base-image)? Great! + +The specific process will depend heavily on the Linux distribution you +want to package. We have some examples below, and you are encouraged to +submit pull requests to contribute new ones. + +## Create a full image using tar + +In general, you'll want to start with a working machine that is running +the distribution you'd like to package as a base image, though that is +not required for some tools like Debian's +[Debootstrap](https://wiki.debian.org/Debootstrap), which you can also +use to build Ubuntu images. + +It can be as simple as this to create an Ubuntu base image: + + $ sudo debootstrap raring raring > /dev/null + $ sudo tar -C raring -c . | docker import - raring + a29c15f1bf7a + $ docker run raring cat /etc/lsb-release + DISTRIB_ID=Ubuntu + DISTRIB_RELEASE=13.04 + DISTRIB_CODENAME=raring + DISTRIB_DESCRIPTION="Ubuntu 13.04" + +There are more example scripts for creating base images in the Docker +GitHub Repo: + + - [BusyBox](https://github.com/docker/docker/blob/master/contrib/mkimage-busybox.sh) + - CentOS / Scientific Linux CERN (SLC) [on Debian/Ubuntu]( + https://github.com/docker/docker/blob/master/contrib/mkimage-rinse.sh) or + [on CentOS/RHEL/SLC/etc.]( + https://github.com/docker/docker/blob/master/contrib/mkimage-yum.sh) + - [Debian / Ubuntu]( + https://github.com/docker/docker/blob/master/contrib/mkimage-debootstrap.sh) + +## Creating a simple base image using scratch + +You can use Docker's reserved, minimal image, `scratch`, as a starting point for building containers. Using the `scratch` "image" signals to the build process that you want the next command in the `Dockerfile` to be the first filesystem layer in your image. + +While `scratch` appears in Docker's repository on the hub, you can't pull it, run it, or tag any image with the name `scratch`. Instead, you can refer to it in your `Dockerfile`. For example, to create a minimal container using `scratch`: + + FROM scratch + ADD hello / + CMD ["/hello"] + +This example creates the hello-world image used in the tutorials. +If you want to test it out, you can clone [the image repo](https://github.com/docker-library/hello-world) + + +## More resources + +There are lots more resources available to help you write your 'Dockerfile`. + +* There's a [complete guide to all the instructions](/reference/builder/) available for use in a `Dockerfile` in the reference section. +* To help you write a clear, readable, maintainable `Dockerfile`, we've also +written a [`Dockerfile` Best Practices guide](/articles/dockerfile_best-practices). +* If your goal is to create a new Official Repository, be sure to read up on Docker's [Official Repositories](/docker-hub/official_repos/). diff --git a/docs/articles/basics.md b/docs/articles/basics.md new file mode 100644 index 00000000..905266d0 --- /dev/null +++ b/docs/articles/basics.md @@ -0,0 +1,187 @@ + + +# Get started with containers + +This guide assumes you have a working installation of Docker. To verify Docker is +installed, use the following command: + + # Check that you have a working install + $ docker info + +If you get `docker: command not found` or something like +`/var/lib/docker/repositories: permission denied` you may have an +incomplete Docker installation or insufficient privileges to access +Docker on your machine. Please + +Additionally, depending on your Docker system configuration, you may be required +to preface each `docker` command with `sudo`. To avoid having to use `sudo` with +the `docker` command, your system administrator can create a Unix group called +`docker` and add users to it. + +For more information about installing Docker or `sudo` configuration, refer to +the [installation](/installation) instructions for your operating system. + + +## Download a pre-built image + + # Download an ubuntu image + $ docker pull ubuntu + +This will find the `ubuntu` image by name on +[*Docker Hub*](/userguide/dockerrepos/#searching-for-images) +and download it from [Docker Hub](https://hub.docker.com) to a local +image cache. + +> **Note**: +> When the image is successfully downloaded, you see a 12 character +> hash `539c0211cd76: Download complete` which is the +> short form of the image ID. These short image IDs are the first 12 +> characters of the full image ID - which can be found using +> `docker inspect` or `docker images --no-trunc=true`. + +## Running an interactive shell + +To run an interactive shell in the Ubuntu image: + + $ docker run -i -t ubuntu /bin/bash + +The `-i` flag starts an interactive container. The `-t` flag creates a pseudo-TTY that attaches `stdin` and `stdout`. + +To detach the `tty` without exiting the shell, use the escape sequence `Ctrl-p` + `Ctrl-q`. The container will continue to exist in a stopped state once exited. To list all containers, stopped and running use the `docker ps -a` command. + +## Bind Docker to another host/port or a Unix socket + +> **Warning**: +> Changing the default `docker` daemon binding to a +> TCP port or Unix *docker* user group will increase your security risks +> by allowing non-root users to gain *root* access on the host. Make sure +> you control access to `docker`. If you are binding +> to a TCP port, anyone with access to that port has full Docker access; +> so it is not advisable on an open network. + +With `-H` it is possible to make the Docker daemon to listen on a +specific IP and port. By default, it will listen on +`unix:///var/run/docker.sock` to allow only local connections by the +*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP +to give access to everybody, but that is **not recommended** because +then it is trivial for someone to gain root access to the host where the +daemon is running. + +Similarly, the Docker client can use `-H` to connect to a custom port. + +`-H` accepts host and port assignment in the following format: + + tcp://[host][:port][path] or unix://path + +For example: + +- `tcp://host:2375` -> TCP connection on + host:2375 +- `tcp://host:2375/path` -> TCP connection on + host:2375 and prepend path to all requests +- `unix://path/to/socket` -> Unix socket located + at `path/to/socket` + +`-H`, when empty, will default to the same value as +when no `-H` was passed in. + +`-H` also accepts short form for TCP bindings: + + host[:port] or :port + +Run Docker in daemon mode: + + $ sudo /docker daemon -H 0.0.0.0:5555 & + +Download an `ubuntu` image: + + $ docker -H :5555 pull ubuntu + +You can use multiple `-H`, for example, if you want to listen on both +TCP and a Unix socket + + # Run docker in daemon mode + $ sudo /docker daemon -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock & + # Download an ubuntu image, use default Unix socket + $ docker pull ubuntu + # OR use the TCP port + $ docker -H tcp://127.0.0.1:2375 pull ubuntu + +## Starting a long-running worker process + + # Start a very useful long-running process + $ JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") + + # Collect the output of the job so far + $ docker logs $JOB + + # Kill the job + $ docker kill $JOB + +## Listing containers + + $ docker ps # Lists only running containers + $ docker ps -a # Lists all containers + +## Controlling containers + + # Start a new container + $ JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") + + # Stop the container + $ docker stop $JOB + + # Start the container + $ docker start $JOB + + # Restart the container + $ docker restart $JOB + + # SIGKILL a container + $ docker kill $JOB + + # Remove a container + $ docker stop $JOB # Container must be stopped to remove it + $ docker rm $JOB + +## Bind a service on a TCP port + + # Bind port 4444 of this container, and tell netcat to listen on it + $ JOB=$(docker run -d -p 4444 ubuntu:12.10 /bin/nc -l 4444) + + # Which public port is NATed to my container? + $ PORT=$(docker port $JOB 4444 | awk -F: '{ print $2 }') + + # Connect to the public port + $ echo hello world | nc 127.0.0.1 $PORT + + # Verify that the network connection worked + $ echo "Daemon received: $(docker logs $JOB)" + +## Committing (saving) a container state + +Save your containers state to an image, so the state can be +re-used. + +When you commit your container, Docker only stores the diff (difference) between the source image and the current state of the container's image. To list images you already have, use the `docker images` command. + + # Commit your container to a new named image + $ docker commit + + # List your images + $ docker images + +You now have an image state from which you can create new instances. + +Read more about [*Share Images via +Repositories*](/userguide/dockerrepos) or +continue to the complete [*Command +Line*](/reference/commandline/cli) diff --git a/docs/articles/certificates.md b/docs/articles/certificates.md new file mode 100644 index 00000000..da2ffcc9 --- /dev/null +++ b/docs/articles/certificates.md @@ -0,0 +1,17 @@ + + +# Using certificates for repository client verification + +The orginal content was deprecated. For information about configuring +cerficates, see [deploying a registry +server](http://docs.docker.com/registry/deploying/). To reach an older version +of this content, refer to an older version of the documentation. diff --git a/docs/articles/cfengine_process_management.md b/docs/articles/cfengine_process_management.md new file mode 100644 index 00000000..b2e57373 --- /dev/null +++ b/docs/articles/cfengine_process_management.md @@ -0,0 +1,149 @@ + + +# Process management with CFEngine + +Create Docker containers with managed processes. + +Docker monitors one process in each running container and the container +lives or dies with that process. By introducing CFEngine inside Docker +containers, we can alleviate a few of the issues that may arise: + + - It is possible to easily start multiple processes within a + container, all of which will be managed automatically, with the + normal `docker run` command. + - If a managed process dies or crashes, CFEngine will start it again + within 1 minute. + - The container itself will live as long as the CFEngine scheduling + daemon (cf-execd) lives. With CFEngine, we are able to decouple the + life of the container from the uptime of the service it provides. + +## How it works + +CFEngine, together with the cfe-docker integration policies, are +installed as part of the Dockerfile. This builds CFEngine into our +Docker image. + +The Dockerfile's `ENTRYPOINT` takes an arbitrary +amount of commands (with any desired arguments) as parameters. When we +run the Docker container these parameters get written to CFEngine +policies and CFEngine takes over to ensure that the desired processes +are running in the container. + +CFEngine scans the process table for the `basename` of the commands given +to the `ENTRYPOINT` and runs the command to start the process if the `basename` +is not found. For example, if we start the container with +`docker run "/path/to/my/application parameters"`, CFEngine will look for a +process named `application` and run the command. If an entry for `application` +is not found in the process table at any point in time, CFEngine will execute +`/path/to/my/application parameters` to start the application once again. The +check on the process table happens every minute. + +Note that it is therefore important that the command to start your +application leaves a process with the basename of the command. This can +be made more flexible by making some minor adjustments to the CFEngine +policies, if desired. + +## Usage + +This example assumes you have Docker installed and working. We will +install and manage `apache2` and `sshd` +in a single container. + +There are three steps: + +1. Install CFEngine into the container. +2. Copy the CFEngine Docker process management policy into the + containerized CFEngine installation. +3. Start your application processes as part of the `docker run` command. + +### Building the image + +The first two steps can be done as part of a Dockerfile, as follows. + + FROM ubuntu + MAINTAINER Eystein Måløy Stenberg + + RUN apt-get update && apt-get install -y wget lsb-release unzip ca-certificates + + # install latest CFEngine + RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add - + RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list + RUN apt-get update && apt-get install -y cfengine-community + + # install cfe-docker process management policy + RUN wget https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/ + RUN cp /tmp/cfe-docker-master/cfengine/bin/* /var/cfengine/bin/ + RUN cp /tmp/cfe-docker-master/cfengine/inputs/* /var/cfengine/inputs/ + RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip + + # apache2 and openssh are just for testing purposes, install your own apps here + RUN apt-get update && apt-get install -y openssh-server apache2 + RUN mkdir -p /var/run/sshd + RUN echo "root:password" | chpasswd # need a password for ssh + + ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"] + +By saving this file as Dockerfile to a working directory, you can then build +your image with the docker build command, e.g., +`docker build -t managed_image`. + +### Testing the container + +Start the container with `apache2` and `sshd` running and managed, forwarding +a port to our SSH instance: + + $ docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start" + +We now clearly see one of the benefits of the cfe-docker integration: it +allows to start several processes as part of a normal `docker run` command. + +We can now log in to our new container and see that both `apache2` and `sshd` +are running. We have set the root password to "password" in the Dockerfile +above and can use that to log in with ssh: + + ssh -p222 root@127.0.0.1 + + ps -ef + UID PID PPID C STIME TTY TIME CMD + root 1 0 0 07:48 ? 00:00:00 /bin/bash /var/cfengine/bin/docker_processes_run.sh /usr/sbin/sshd /etc/init.d/apache2 start + root 18 1 0 07:48 ? 00:00:00 /var/cfengine/bin/cf-execd -F + root 20 1 0 07:48 ? 00:00:00 /usr/sbin/sshd + root 32 1 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start + www-data 34 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start + www-data 35 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start + www-data 36 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start + root 93 20 0 07:48 ? 00:00:00 sshd: root@pts/0 + root 105 93 0 07:48 pts/0 00:00:00 -bash + root 112 105 0 07:49 pts/0 00:00:00 ps -ef + +If we stop apache2, it will be started again within a minute by +CFEngine. + + service apache2 status + Apache2 is running (pid 32). + service apache2 stop + * Stopping web server apache2 ... waiting [ OK ] + service apache2 status + Apache2 is NOT running. + # ... wait up to 1 minute... + service apache2 status + Apache2 is running (pid 173). + +## Adapting to your applications + +To make sure your applications get managed in the same manner, there are +just two things you need to adjust from the above example: + + - In the Dockerfile used above, install your applications instead of + `apache2` and `sshd`. + - When you start the container with `docker run`, + specify the command line arguments to your applications rather than + `apache2` and `sshd`. diff --git a/docs/articles/chef.md b/docs/articles/chef.md new file mode 100644 index 00000000..6a30ccb7 --- /dev/null +++ b/docs/articles/chef.md @@ -0,0 +1,80 @@ + + +# Using Chef + +> **Note**: +> Please note this is a community contributed installation path. The only +> `official` installation is using the +> [*Ubuntu*](/installation/ubuntulinux) installation +> path. This version may sometimes be out of date. + +## Requirements + +To use this guide you'll need a working installation of +[Chef](http://www.getchef.com/). This cookbook supports a variety of +operating systems. + +## Installation + +The cookbook is available on the [Chef Community +Site](http://community.opscode.com/cookbooks/docker) and can be +installed using your favorite cookbook dependency manager. + +The source can be found on +[GitHub](https://github.com/bflad/chef-docker). + +## Usage + +The cookbook provides recipes for installing Docker, configuring init +for Docker, and resources for managing images and containers. It +supports almost all Docker functionality. + +### Installation + + include_recipe 'docker' + +### Images + +The next step is to pull a Docker image. For this, we have a resource: + + docker_image 'samalba/docker-registry' + +This is equivalent to running: + + $ docker pull samalba/docker-registry + +There are attributes available to control how long the cookbook will +allow for downloading (5 minute default). + +To remove images you no longer need: + + docker_image 'samalba/docker-registry' do + action :remove + end + +### Containers + +Now you have an image where you can run commands within a container +managed by Docker. + + docker_container 'samalba/docker-registry' do + detach true + port '5000:5000' + env 'SETTINGS_FLAVOR=local' + volume '/mnt/docker:/docker-storage' + end + +This is equivalent to running the following command, but under upstart: + + $ docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry + +The resources will accept a single string or an array of values for any +Docker flags that allow multiple values. diff --git a/docs/articles/configuring.md b/docs/articles/configuring.md new file mode 100644 index 00000000..d7ae834d --- /dev/null +++ b/docs/articles/configuring.md @@ -0,0 +1,244 @@ + + +# Configuring and running Docker on various distributions + +After successfully installing Docker, the `docker` daemon runs with its default +configuration. + +In a production environment, system administrators typically configure the +`docker` daemon to start and stop according to an organization's requirements. In most +cases, the system administrator configures a process manager such as `SysVinit`, `Upstart`, +or `systemd` to manage the `docker` daemon's start and stop. + +### Running the docker daemon directly + +The `docker` daemon can be run directly using the `-d` option. By default it listens on +the Unix socket `unix:///var/run/docker.sock` + + $ docker daemon + + INFO[0000] +job init_networkdriver() + INFO[0000] +job serveapi(unix:///var/run/docker.sock) + INFO[0000] Listening for HTTP on unix (/var/run/docker.sock) + ... + ... + +### Configuring the docker daemon directly + +If you're running the `docker` daemon directly by running `docker daemon` instead +of using a process manager, you can append the configuration options to the `docker` run +command directly. Other options can be passed to the `docker` daemon to configure it. + +Some of the daemon's options are: + +| Flag | Description | +|-----------------------|-----------------------------------------------------------| +| `-D`, `--debug=false` | Enable or disable debug mode. By default, this is false. | +| `-H`,`--host=[]` | Daemon socket(s) to connect to. | +| `--tls=false` | Enable or disable TLS. By default, this is false. | + + +Here is a an example of running the `docker` daemon with configuration options: + + $ docker daemon -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376 + +These options : + +- Enable `-D` (debug) mode +- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively +- Listen for connections on `tcp://192.168.59.3:2376` + +The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon) +with explanations. + +## Ubuntu + +As of `14.04`, Ubuntu uses Upstart as a process manager. By default, Upstart jobs +are located in `/etc/init` and the `docker` Upstart job can be found at `/etc/init/docker.conf`. + +After successfully [installing Docker for Ubuntu](/installation/ubuntulinux/), +you can check the running status using Upstart in this way: + + $ sudo status docker + + docker start/running, process 989 + +### Running Docker + +You can start/stop/restart the `docker` daemon using + + $ sudo start docker + + $ sudo stop docker + + $ sudo restart docker + + +### Configuring Docker + +You configure the `docker` daemon in the `/etc/default/docker` file on your +system. You do this by specifying values in a `DOCKER_OPTS` variable. + +To configure Docker options: + +1. Log into your host as a user with `sudo` or `root` privileges. + +2. If you don't have one, create the `/etc/default/docker` file on your host. Depending on how +you installed Docker, you may already have this file. + +3. Open the file with your favorite editor. + + ``` + $ sudo vi /etc/default/docker + ``` + +4. Add a `DOCKER_OPTS` variable with the following options. These options are appended to the +`docker` daemon's run command. + +``` + DOCKER_OPTS="-D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376" +``` + +These options : + +- Enable `-D` (debug) mode +- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively +- Listen for connections on `tcp://192.168.59.3:2376` + +The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon) +with explanations. + + +5. Save and close the file. + +6. Restart the `docker` daemon. + + ``` + $ sudo restart docker + ``` + +7. Verify that the `docker` daemon is running as specified with the `ps` command. + + ``` + $ ps aux | grep docker | grep -v grep + ``` + +### Logs + +By default logs for Upstart jobs are located in `/var/log/upstart` and the logs for `docker` daemon +can be located at `/var/log/upstart/docker.log` + + $ tail -f /var/log/upstart/docker.log + INFO[0000] Loading containers: done. + INFO[0000] docker daemon: 1.6.0 4749651; execdriver: native-0.2; graphdriver: aufs + INFO[0000] +job acceptconnections() + INFO[0000] -job acceptconnections() = OK (0) + INFO[0000] Daemon has completed initialization + + +## CentOS / Red Hat Enterprise Linux / Fedora + +As of `7.x`, CentOS and RHEL use `systemd` as the process manager. As of `21`, Fedora uses +`systemd` as its process manager. + +After successfully installing Docker for [CentOS](/installation/centos/)/[Red Hat Enterprise Linux] +(/installation/rhel/)/[Fedora](/installation/fedora), you can check the running status in this way: + + $ sudo systemctl status docker + +### Running Docker + +You can start/stop/restart the `docker` daemon using + + $ sudo systemctl start docker + + $ sudo systemctl stop docker + + $ sudo systemctl restart docker + +If you want Docker to start at boot, you should also: + + $ sudo systemctl enable docker + +### Configuring Docker + +You configure the `docker` daemon in the `/etc/sysconfig/docker` file on your +host. You do this by specifying values in a variable. For CentOS 7.x and RHEL 7.x, the name +of the variable is `OPTIONS` and for CentOS 6.x and RHEL 6.x, the name of the variable is +`other_args`. For this section, we will use CentOS 7.x as an example to configure the `docker` +daemon. + +By default, systemd services are located either in `/etc/systemd/service`, `/lib/systemd/system` +or `/usr/lib/systemd/system`. The `docker.service` file can be found in either of these three +directories depending on your host. + +To configure Docker options: + +1. Log into your host as a user with `sudo` or `root` privileges. + +2. If you don't have one, create the `/etc/sysconfig/docker` file on your host. Depending on how +you installed Docker, you may already have this file. + +3. Open the file with your favorite editor. + + ``` + $ sudo vi /etc/sysconfig/docker + ``` + +4. Add a `OPTIONS` variable with the following options. These options are appended to the +command that starts the `docker` daemon. + +``` + OPTIONS="-D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376" +``` + +These options : + +- Enable `-D` (debug) mode +- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively +- Listen for connections on `tcp://192.168.59.3:2376` + +The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon) +with explanations. + +5. Save and close the file. + +6. Restart the `docker` daemon. + + ``` + $ sudo service docker restart + ``` + +7. Verify that the `docker` daemon is running as specified with the `ps` command. + + ``` + $ ps aux | grep docker | grep -v grep + ``` + +### Logs + +systemd has its own logging system called the journal. The logs for the `docker` daemon can +be viewed using `journalctl -u docker` + + $ sudo journalctl -u docker + May 06 00:22:05 localhost.localdomain systemd[1]: Starting Docker Application Container Engine... + May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="+job serveapi(unix:///var/run/docker.sock)" + May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="Listening for HTTP on unix (/var/run/docker.sock)" + May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job init_networkdriver()" + May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)" + May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start." + May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done." + May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="docker daemon: 1.5.0-dev fc0329b/1.5.0; execdriver: native-0.2; graphdriver: devicemapper" + May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()" + May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)" + +_Note: Using and configuring journal is an advanced topic and is beyond the scope of this article._ diff --git a/docs/articles/dockerfile_best-practices.md b/docs/articles/dockerfile_best-practices.md new file mode 100644 index 00000000..1193eff8 --- /dev/null +++ b/docs/articles/dockerfile_best-practices.md @@ -0,0 +1,471 @@ + + +# Best practices for writing Dockerfiles + +## Overview + +Docker can build images automatically by reading the instructions from a +`Dockerfile`, a text file that contains all the commands, in order, needed to +build a given image. `Dockerfile`s adhere to a specific format and use a +specific set of instructions. You can learn the basics on the +[Dockerfile Reference](https://docs.docker.com/reference/builder/) page. If +you’re new to writing `Dockerfile`s, you should start there. + +This document covers the best practices and methods recommended by Docker, +Inc. and the Docker community for creating easy-to-use, effective +`Dockerfile`s. We strongly suggest you follow these recommendations (in fact, +if you’re creating an Official Image, you *must* adhere to these practices). + +You can see many of these practices and recommendations in action in the [buildpack-deps `Dockerfile`](https://github.com/docker-library/buildpack-deps/blob/master/jessie/Dockerfile). + +> Note: for more detailed explanations of any of the Dockerfile commands +>mentioned here, visit the [Dockerfile Reference](https://docs.docker.com/reference/builder/) page. + +## General guidelines and recommendations + +### Containers should be ephemeral + +The container produced by the image your `Dockerfile` defines should be as +ephemeral as possible. By “ephemeral,” we mean that it can be stopped and +destroyed and a new one built and put in place with an absolute minimum of +set-up and configuration. + +### Use a .dockerignore file + +In most cases, it's best to put each Dockerfile in an empty directory. Then, +add to that directory only the files needed for building the Dockerfile. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to that directory as well. This file supports +exclusion patterns similar to `.gitignore` files. For information on creating one, +see the [.dockerignore file](../../reference/builder/#dockerignore-file). + +### Avoid installing unnecessary packages + +In order to reduce complexity, dependencies, file sizes, and build times, you +should avoid installing extra or unnecessary packages just because they +might be “nice to have.” For example, you don’t need to include a text editor +in a database image. + +### Run only one process per container + +In almost all cases, you should only run a single process in a single +container. Decoupling applications into multiple containers makes it much +easier to scale horizontally and reuse containers. If that service depends on +another service, make use of [container linking](https://docs.docker.com/userguide/dockerlinks/). + +### Minimize the number of layers + +You need to find the balance between readability (and thus long-term +maintainability) of the `Dockerfile` and minimizing the number of layers it +uses. Be strategic and cautious about the number of layers you use. + +### Sort multi-line arguments + +Whenever possible, ease later changes by sorting multi-line arguments +alphanumerically. This will help you avoid duplication of packages and make the +list much easier to update. This also makes PRs a lot easier to read and +review. Adding a space before a backslash (`\`) helps as well. + +Here’s an example from the [`buildpack-deps` image](https://github.com/docker-library/buildpack-deps): + + RUN apt-get update && apt-get install -y \ + bzr \ + cvs \ + git \ + mercurial \ + subversion + +### Build cache + +During the process of building an image Docker will step through the +instructions in your `Dockerfile` executing each in the order specified. +As each instruction is examined Docker will look for an existing image in its +cache that it can reuse, rather than creating a new (duplicate) image. +If you do not want to use the cache at all you can use the ` --no-cache=true` +option on the `docker build` command. + +However, if you do let Docker use its cache then it is very important to +understand when it will, and will not, find a matching image. The basic rules +that Docker will follow are outlined below: + +* Starting with a base image that is already in the cache, the next +instruction is compared against all child images derived from that base +image to see if one of them was built using the exact same instruction. If +not, the cache is invalidated. + +* In most cases simply comparing the instruction in the `Dockerfile` with one +of the child images is sufficient. However, certain instructions require +a little more examination and explanation. + +* For the `ADD` and `COPY` instructions, the contents of the file(s) +in the image are examined and a checksum is calculated for each file. +The last-modified and last-accessed times of the file(s) are not considered in +these checksums. During the cache lookup, the checksum is compared against the +checksum in the existing images. If anything has changed in the file(s), such +as the contents and metadata, then the cache is invalidated. + +* Aside from the `ADD` and `COPY` commands cache checking will not look at the +files in the container to determine a cache match. For example, when processing +a `RUN apt-get -y update` command the files updated in the container +will not be examined to determine if a cache hit exists. In that case just +the command string itself will be used to find a match. + +Once the cache is invalidated, all subsequent `Dockerfile` commands will +generate new images and the cache will not be used. + +## The Dockerfile instructions + +Below you'll find recommendations for the best way to write the +various instructions available for use in a `Dockerfile`. + +### FROM + +[Dockerfile reference for the FROM instruction](https://docs.docker.com/reference/builder/#from) + +Whenever possible, use current Official Repositories as the basis for your +image. We recommend the [Debian image](https://registry.hub.docker.com/_/debian/) +since it’s very tightly controlled and kept extremely minimal (currently under +100 mb), while still being a full distribution. + +### RUN + +[Dockerfile reference for the RUN instruction](https://docs.docker.com/reference/builder/#run) + +As always, to make your `Dockerfile` more readable, understandable, and +maintainable, put long or complex `RUN` statements on multiple lines separated +with backslashes. + +Probably the most common use-case for `RUN` is an application of `apt-get`. +When using `apt-get`, here are a few things to keep in mind: + +* Don’t do `RUN apt-get update` on a single line. This will cause +caching issues if the referenced archive gets updated, which will make your +subsequent `apt-get install` fail without comment. + +* Avoid `RUN apt-get upgrade` or `dist-upgrade`, since many of the “essential” +packages from the base images will fail to upgrade inside an unprivileged +container. If a base package is out of date, you should contact its +maintainers. If you know there’s a particular package, `foo`, that needs to be +updated, use `apt-get install -y foo` and it will update automatically. + +* Do write instructions like: + + RUN apt-get update && apt-get install -y \ + package-bar \ + package-baz \ + package-foo + +Writing the instruction this way not only makes it easier to read +and maintain, but also, by including `apt-get update`, ensures that the cache +will naturally be busted and the latest versions will be installed with no +further coding or manual intervention required. + +* Further natural cache-busting can be realized by version-pinning packages +(e.g., `package-foo=1.3.*`). This will force retrieval of that version +regardless of what’s in the cache. +Writing your `apt-get` code this way will greatly ease maintenance and reduce +failures due to unanticipated changes in required packages. + +#### Example + +Below is a well-formed `RUN` instruction that demonstrates the above +recommendations. Note that the last package, `s3cmd`, specifies a version +`1.1.0*`. If the image previously used an older version, specifying the new one +will cause a cache bust of `apt-get update` and ensure the installation of +the new version (which in this case had a new, required feature). + + RUN apt-get update && apt-get install -y \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + lxc=1.0* \ + mercurial \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* + +Writing the instruction this way also helps you avoid potential duplication of +a given package because it is much easier to read than an instruction like: + + RUN apt-get install -y package-foo && apt-get install -y package-bar + +### CMD + +[Dockerfile reference for the CMD instruction](https://docs.docker.com/reference/builder/#cmd) + +The `CMD` instruction should be used to run the software contained by your +image, along with any arguments. `CMD` should almost always be used in the +form of `CMD [“executable”, “param1”, “param2”…]`. Thus, if the image is for a +service (Apache, Rails, etc.), you would run something like +`CMD ["apache2","-DFOREGROUND"]`. Indeed, this form of the instruction is +recommended for any service-based image. + +In most other cases, `CMD` should be given an interactive shell (bash, python, +perl, etc), for example, `CMD ["perl", "-de0"]`, `CMD ["python"]`, or +`CMD [“php”, “-a”]`. Using this form means that when you execute something like +`docker run -it python`, you’ll get dropped into a usable shell, ready to go. +`CMD` should rarely be used in the manner of `CMD [“param”, “param”]` in +conjunction with [`ENTRYPOINT`](https://docs.docker.com/reference/builder/#entrypoint), unless +you and your expected users are already quite familiar with how `ENTRYPOINT` +works. + +### EXPOSE + +[Dockerfile reference for the EXPOSE instruction](https://docs.docker.com/reference/builder/#expose) + +The `EXPOSE` instruction indicates the ports on which a container will listen +for connections. Consequently, you should use the common, traditional port for +your application. For example, an image containing the Apache web server would +use `EXPOSE 80`, while an image containing MongoDB would use `EXPOSE 27017` and +so on. + +For external access, your users can execute `docker run` with a flag indicating +how to map the specified port to the port of their choice. +For container linking, Docker provides environment variables for the path from +the recipient container back to the source (ie, `MYSQL_PORT_3306_TCP`). + +### ENV + +[Dockerfile reference for the ENV instruction](https://docs.docker.com/reference/builder/#env) + +In order to make new software easier to run, you can use `ENV` to update the +`PATH` environment variable for the software your container installs. For +example, `ENV PATH /usr/local/nginx/bin:$PATH` will ensure that `CMD [“nginx”]` +just works. + +The `ENV` instruction is also useful for providing required environment +variables specific to services you wish to containerize, such as Postgres’s +`PGDATA`. + +Lastly, `ENV` can also be used to set commonly used version numbers so that +version bumps are easier to maintain, as seen in the following example: + + ENV PG_MAJOR 9.3 + ENV PG_VERSION 9.3.4 + RUN curl -SL http://example.com/postgres-$PG_VERSION.tar.xz | tar -xJC /usr/src/postgress && … + ENV PATH /usr/local/postgres-$PG_MAJOR/bin:$PATH + +Similar to having constant variables in a program (as opposed to hard-coding +values), this approach lets you change a single `ENV` instruction to +auto-magically bump the version of the software in your container. + +### ADD or COPY + +[Dockerfile reference for the ADD instruction](https://docs.docker.com/reference/builder/#add)
+[Dockerfile reference for the COPY instruction](https://docs.docker.com/reference/builder/#copy) + +Although `ADD` and `COPY` are functionally similar, generally speaking, `COPY` +is preferred. That’s because it’s more transparent than `ADD`. `COPY` only +supports the basic copying of local files into the container, while `ADD` has +some features (like local-only tar extraction and remote URL support) that are +not immediately obvious. Consequently, the best use for `ADD` is local tar file +auto-extraction into the image, as in `ADD rootfs.tar.xz /`. + +If you have multiple `Dockerfile` steps that use different files from your +context, `COPY` them individually, rather than all at once. This will ensure that +each step's build cache is only invalidated (forcing the step to be re-run) if the +specifically required files change. + +For example: + + COPY requirements.txt /tmp/ + RUN pip install /tmp/requirements.txt + COPY . /tmp/ + +Results in fewer cache invalidations for the `RUN` step, than if you put the +`COPY . /tmp/` before it. + +Because image size matters, using `ADD` to fetch packages from remote URLs is +strongly discouraged; you should use `curl` or `wget` instead. That way you can +delete the files you no longer need after they've been extracted and you won't +have to add another layer in your image. For example, you should avoid doing +things like: + + ADD http://example.com/big.tar.xz /usr/src/things/ + RUN tar -xJf /usr/src/things/big.tar.xz -C /usr/src/things + RUN make -C /usr/src/things all + +And instead, do something like: + + RUN mkdir -p /usr/src/things \ + && curl -SL http://example.com/big.tar.xz \ + | tar -xJC /usr/src/things \ + && make -C /usr/src/things all + +For other items (files, directories) that do not require `ADD`’s tar +auto-extraction capability, you should always use `COPY`. + +### ENTRYPOINT + +[Dockerfile reference for the ENTRYPOINT instruction](https://docs.docker.com/reference/builder/#entrypoint) + +The best use for `ENTRYPOINT` is to set the image's main command, allowing that +image to be run as though it was that command (and then use `CMD` as the +default flags). + +Let's start with an example of an image for the command line tool `s3cmd`: + + ENTRYPOINT ["s3cmd"] + CMD ["--help"] + +Now the image can be run like this to show the command's help: + + $ docker run s3cmd + +Or using the right parameters to execute a command: + + $ docker run s3cmd ls s3://mybucket + +This is useful because the image name can double as a reference to the binary as +shown in the command above. + +The `ENTRYPOINT` instruction can also be used in combination with a helper +script, allowing it to function in a similar way to the command above, even +when starting the tool may require more than one step. + +For example, the [Postgres Official Image](https://registry.hub.docker.com/_/postgres/) +uses the following script as its `ENTRYPOINT`: + +```bash +#!/bin/bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +> **Note**: +> This script uses [the `exec` Bash command](http://wiki.bash-hackers.org/commands/builtin/exec) +> so that the final running application becomes the container's PID 1. This allows +> the application to receive any Unix signals sent to the container. +> See the [`ENTRYPOINT`](https://docs.docker.com/reference/builder/#entrypoint) +> help for more details. + + +The helper script is copied into the container and run via `ENTRYPOINT` on +container start: + + COPY ./docker-entrypoint.sh / + ENTRYPOINT ["/docker-entrypoint.sh"] + +This script allows the user to interact with Postgres in several ways. + +It can simply start Postgres: + + $ docker run postgres + +Or, it can be used to run Postgres and pass parameters to the server: + + $ docker run postgres postgres --help + +Lastly, it could also be used to start a totally different tool, such as Bash: + + $ docker run --rm -it postgres bash + +### VOLUME + +[Dockerfile reference for the VOLUME instruction](https://docs.docker.com/reference/builder/#volume) + +The `VOLUME` instruction should be used to expose any database storage area, +configuration storage, or files/folders created by your docker container. You +are strongly encouraged to use `VOLUME` for any mutable and/or user-serviceable +parts of your image. + +### USER + +[Dockerfile reference for the USER instruction](https://docs.docker.com/reference/builder/#user) + +If a service can run without privileges, use `USER` to change to a non-root +user. Start by creating the user and group in the `Dockerfile` with something +like `RUN groupadd -r postgres && useradd -r -g postgres postgres`. + +> **Note:** Users and groups in an image get a non-deterministic +> UID/GID in that the “next” UID/GID gets assigned regardless of image +> rebuilds. So, if it’s critical, you should assign an explicit UID/GID. + +You should avoid installing or using `sudo` since it has unpredictable TTY and +signal-forwarding behavior that can cause more problems than it solves. If +you absolutely need functionality similar to `sudo` (e.g., initializing the +daemon as root but running it as non-root), you may be able to use +[“gosu”](https://github.com/tianon/gosu). + +Lastly, to reduce layers and complexity, avoid switching `USER` back +and forth frequently. + +### WORKDIR + +[Dockerfile reference for the WORKDIR instruction](https://docs.docker.com/reference/builder/#workdir) + +For clarity and reliability, you should always use absolute paths for your +`WORKDIR`. Also, you should use `WORKDIR` instead of proliferating +instructions like `RUN cd … && do-something`, which are hard to read, +troubleshoot, and maintain. + +### ONBUILD + +[Dockerfile reference for the ONBUILD instruction](https://docs.docker.com/reference/builder/#onbuild) + +An `ONBUILD` command executes after the current `Dockerfile` build completes. +`ONBUILD` executes in any child image derived `FROM` the current image. Think +of the `ONBUILD` command as an instruction the parent `Dockerfile` gives +to the child `Dockerfile`. + +A Docker build executes `ONBUILD` commands before any command in a child +`Dockerfile`. + +`ONBUILD` is useful for images that are going to be built `FROM` a given +image. For example, you would use `ONBUILD` for a language stack image that +builds arbitrary user software written in that language within the +`Dockerfile`, as you can see in [Ruby’s `ONBUILD` variants](https://github.com/docker-library/ruby/blob/master/2.1/onbuild/Dockerfile). + +Images built from `ONBUILD` should get a separate tag, for example: +`ruby:1.9-onbuild` or `ruby:2.0-onbuild`. + +Be careful when putting `ADD` or `COPY` in `ONBUILD`. The “onbuild” image will +fail catastrophically if the new build's context is missing the resource being +added. Adding a separate tag, as recommended above, will help mitigate this by +allowing the `Dockerfile` author to make a choice. + +## Examples for Official Repositories + +These Official Repositories have exemplary `Dockerfile`s: + +* [Go](https://registry.hub.docker.com/_/golang/) +* [Perl](https://registry.hub.docker.com/_/perl/) +* [Hy](https://registry.hub.docker.com/_/hylang/) +* [Rails](https://registry.hub.docker.com/_/rails) + +## Additional resources: + +* [Dockerfile Reference](https://docs.docker.com/reference/builder/) +* [More about Base Images](https://docs.docker.com/articles/baseimages/) +* [More about Automated Builds](https://docs.docker.com/docker-hub/builds/) +* [Guidelines for Creating Official +Repositories](https://docs.docker.com/docker-hub/official_repos/) diff --git a/docs/articles/dsc.md b/docs/articles/dsc.md new file mode 100644 index 00000000..2fe7553b --- /dev/null +++ b/docs/articles/dsc.md @@ -0,0 +1,173 @@ + + +# Using PowerShell DSC + +Windows PowerShell Desired State Configuration (DSC) is a configuration +management tool that extends the existing functionality of Windows PowerShell. +DSC uses a declarative syntax to define the state in which a target should be +configured. More information about PowerShell DSC can be found at +[http://technet.microsoft.com/en-us/library/dn249912.aspx](http://technet.microsoft.com/en-us/library/dn249912.aspx). + +## Requirements + +To use this guide you'll need a Windows host with PowerShell v4.0 or newer. + +The included DSC configuration script also uses the official PPA so +only an Ubuntu target is supported. The Ubuntu target must already have the +required OMI Server and PowerShell DSC for Linux providers installed. More +information can be found at [https://github.com/MSFTOSSMgmt/WPSDSCLinux](https://github.com/MSFTOSSMgmt/WPSDSCLinux). +The source repository listed below also includes PowerShell DSC for Linux +installation and init scripts along with more detailed installation information. + +## Installation + +The DSC configuration example source is available in the following repository: +[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC). It can be cloned with: + + $ git clone https://github.com/anweiss/DockerClientDSC.git + +## Usage + +The DSC configuration utilizes a set of shell scripts to determine whether or +not the specified Docker components are configured on the target node(s). The +source repository also includes a script (`RunDockerClientConfig.ps1`) that can +be used to establish the required CIM session(s) and execute the +`Set-DscConfiguration` cmdlet. + +More detailed usage information can be found at +[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC). + +### Install Docker +The Docker installation configuration is equivalent to running: + +``` +apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys\ +36A1D7869245C8950F966E92D8576A8BA88D21E9 +sh -c "echo deb https://apt.dockerproject.org/repo ubuntu-trusty main\ +> /etc/apt/sources.list.d/docker.list" +apt-get update +apt-get install docker-engine +``` + +Ensure that your current working directory is set to the `DockerClientDSC` +source and load the DockerClient configuration into the current PowerShell +session + +```powershell +. .\DockerClient.ps1 +``` + +Generate the required DSC configuration .mof file for the targeted node + +```powershell +DockerClient -Hostname "myhost" +``` + +A sample DSC configuration data file has also been included and can be modified +and used in conjunction with or in place of the `Hostname` parameter: + +```powershell +DockerClient -ConfigurationData .\DockerConfigData.psd1 +``` + +Start the configuration application process on the targeted node + +```powershell +.\RunDockerClientConfig.ps1 -Hostname "myhost" +``` + +The `RunDockerClientConfig.ps1` script can also parse a DSC configuration data +file and execute configurations against multiple nodes as such: + +```powershell +.\RunDockerClientConfig.ps1 -ConfigurationData .\DockerConfigData.psd1 +``` + +### Images +Image configuration is equivalent to running: `docker pull [image]` or +`docker rmi -f [IMAGE]`. + +Using the same steps defined above, execute `DockerClient` with the `Image` +parameter and apply the configuration: + +```powershell +DockerClient -Hostname "myhost" -Image "node" +.\RunDockerClientConfig.ps1 -Hostname "myhost" +``` + +You can also configure the host to pull multiple images: + +```powershell +DockerClient -Hostname "myhost" -Image "node","mongo" +.\RunDockerClientConfig.ps1 -Hostname "myhost" +``` + +To remove images, use a hashtable as follows: + +```powershell +DockerClient -Hostname "myhost" -Image @{Name="node"; Remove=$true} +.\RunDockerClientConfig.ps1 -Hostname $hostname +``` + +### Containers +Container configuration is equivalent to running: + +``` +docker run -d --name="[containername]" -p '[port]' -e '[env]' --link '[link]'\ +'[image]' '[command]' +``` +or + +``` +docker rm -f [containername] +``` + +To create or remove containers, you can use the `Container` parameter with one +or more hashtables. The hashtable(s) passed to this parameter can have the +following properties: + +- Name (required) +- Image (required unless Remove property is set to `$true`) +- Port +- Env +- Link +- Command +- Remove + +For example, create a hashtable with the settings for your container: + +```powershell +$webContainer = @{Name="web"; Image="anweiss/docker-platynem"; Port="80:80"} +``` + +Then, using the same steps defined above, execute +`DockerClient` with the `-Image` and `-Container` parameters: + +```powershell +DockerClient -Hostname "myhost" -Image node -Container $webContainer +.\RunDockerClientConfig.ps1 -Hostname "myhost" +``` + +Existing containers can also be removed as follows: + +```powershell +$containerToRemove = @{Name="web"; Remove=$true} +DockerClient -Hostname "myhost" -Container $containerToRemove +.\RunDockerClientConfig.ps1 -Hostname "myhost" +``` + +Here is a hashtable with all of the properties that can be used to create a +container: + +```powershell +$containerProps = @{Name="web"; Image="node:latest"; Port="80:80"; ` +Env="PORT=80"; Link="db:db"; Command="grunt"} +``` diff --git a/docs/articles/host_integration.md b/docs/articles/host_integration.md new file mode 100644 index 00000000..67377217 --- /dev/null +++ b/docs/articles/host_integration.md @@ -0,0 +1,93 @@ + + +# Automatically start containers + +As of Docker 1.2, +[restart policies](/reference/run/#restart-policies-restart) are the +built-in Docker mechanism for restarting containers when they exit. If set, +restart policies will be used when the Docker daemon starts up, as typically +happens after a system boot. Restart policies will ensure that linked containers +are started in the correct order. + +If restart policies don't suit your needs (i.e., you have non-Docker processes +that depend on Docker containers), you can use a process manager like +[upstart](http://upstart.ubuntu.com/), +[systemd](http://freedesktop.org/wiki/Software/systemd/) or +[supervisor](http://supervisord.org/) instead. + + +## Using a process manager + +Docker does not set any restart policies by default, but be aware that they will +conflict with most process managers. So don't set restart policies if you are +using a process manager. + +*Note:* Prior to Docker 1.2, restarting of Docker containers had to be +explicitly disabled. Refer to the +[previous version](/v1.1/articles/host_integration/) of this article for the +details on how to do that. + +When you have finished setting up your image and are happy with your +running container, you can then attach a process manager to manage it. +When you run `docker start -a`, Docker will automatically attach to the +running container, or start it if needed and forward all signals so that +the process manager can detect when a container stops and correctly +restart it. + +Here are a few sample scripts for systemd and upstart to integrate with +Docker. + + +## Examples + +The examples below show configuration files for two popular process managers, +upstart and systemd. In these examples, we'll assume that we have already +created a container to run Redis with `--name=redis_server`. These files define +a new service that will be started after the docker daemon service has started. + + +### upstart + + description "Redis container" + author "Me" + start on filesystem and started docker + stop on runlevel [!2345] + respawn + script + /usr/bin/docker start -a redis_server + end script + +### systemd + + [Unit] + Description=Redis container + Requires=docker.service + After=docker.service + + [Service] + Restart=always + ExecStart=/usr/bin/docker start -a redis_server + ExecStop=/usr/bin/docker stop -t 2 redis_server + + [Install] + WantedBy=local.target + +If you need to pass options to the redis container (such as `--env`), +then you'll need to use `docker run` rather than `docker start`. This will +create a new container every time the service is started, which will be stopped +and removed when the service is stopped. + + [Service] + ... + ExecStart=/usr/bin/docker run --env foo=bar --name redis_server redis + ExecStop=/usr/bin/docker stop -t 2 redis_server ; /usr/bin/docker rm -f redis_server + ... diff --git a/docs/articles/https.md b/docs/articles/https.md new file mode 100644 index 00000000..d7f016d8 --- /dev/null +++ b/docs/articles/https.md @@ -0,0 +1,211 @@ + + +# Protect the Docker daemon socket + +By default, Docker runs via a non-networked Unix socket. It can also +optionally communicate using a HTTP socket. + +If you need Docker to be reachable via the network in a safe manner, you can +enable TLS by specifying the `tlsverify` flag and pointing Docker's +`tlscacert` flag to a trusted CA certificate. + +In the daemon mode, it will only allow connections from clients +authenticated by a certificate signed by that CA. In the client mode, +it will only connect to servers with a certificate signed by that CA. + +> **Warning**: +> Using TLS and managing a CA is an advanced topic. Please familiarize yourself +> with OpenSSL, x509 and TLS before using it in production. + +> **Warning**: +> These TLS commands will only generate a working set of certificates on Linux. +> Mac OS X comes with a version of OpenSSL that is incompatible with the +> certificates that Docker requires. + +## Create a CA, server and client keys with OpenSSL + +> **Note**: replace all instances of `$HOST` in the following example with the +> DNS name of your Docker daemon's host. + +First generate CA private and public keys: + + $ openssl genrsa -aes256 -out ca-key.pem 4096 + Generating RSA private key, 4096 bit long modulus + ............................................................................................................................................................................................++ + ........++ + e is 65537 (0x10001) + Enter pass phrase for ca-key.pem: + Verifying - Enter pass phrase for ca-key.pem: + $ openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem + Enter pass phrase for ca-key.pem: + You are about to be asked to enter information that will be incorporated + into your certificate request. + What you are about to enter is what is called a Distinguished Name or a DN. + There are quite a few fields but you can leave some blank + For some fields there will be a default value, + If you enter '.', the field will be left blank. + ----- + Country Name (2 letter code) [AU]: + State or Province Name (full name) [Some-State]:Queensland + Locality Name (eg, city) []:Brisbane + Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker Inc + Organizational Unit Name (eg, section) []:Sales + Common Name (e.g. server FQDN or YOUR name) []:$HOST + Email Address []:Sven@home.org.au + +Now that we have a CA, you can create a server key and certificate +signing request (CSR). Make sure that "Common Name" (i.e., server FQDN or YOUR +name) matches the hostname you will use to connect to Docker: + +> **Note**: replace all instances of `$HOST` in the following example with the +> DNS name of your Docker daemon's host. + + $ openssl genrsa -out server-key.pem 4096 + Generating RSA private key, 4096 bit long modulus + .....................................................................++ + .................................................................................................++ + e is 65537 (0x10001) + $ openssl req -subj "/CN=$HOST" -sha256 -new -key server-key.pem -out server.csr + +Next, we're going to sign the public key with our CA: + +Since TLS connections can be made via IP address as well as DNS name, they need +to be specified when creating the certificate. For example, to allow connections +using `10.10.10.20` and `127.0.0.1`: + + $ echo subjectAltName = IP:10.10.10.20,IP:127.0.0.1 > extfile.cnf + + $ openssl x509 -req -days 365 -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem \ + -CAcreateserial -out server-cert.pem -extfile extfile.cnf + Signature ok + subject=/CN=your.host.com + Getting CA Private Key + Enter pass phrase for ca-key.pem: + +For client authentication, create a client key and certificate signing +request: + + $ openssl genrsa -out key.pem 4096 + Generating RSA private key, 4096 bit long modulus + .........................................................++ + ................++ + e is 65537 (0x10001) + $ openssl req -subj '/CN=client' -new -key key.pem -out client.csr + +To make the key suitable for client authentication, create an extensions +config file: + + $ echo extendedKeyUsage = clientAuth > extfile.cnf + +Now sign the public key: + + $ openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem \ + -CAcreateserial -out cert.pem -extfile extfile.cnf + Signature ok + subject=/CN=client + Getting CA Private Key + Enter pass phrase for ca-key.pem: + +After generating `cert.pem` and `server-cert.pem` you can safely remove the +two certificate signing requests: + + $ rm -v client.csr server.csr + +With a default `umask` of 022, your secret keys will be *world-readable* and +writable for you and your group. + +In order to protect your keys from accidental damage, you will want to remove their +write permissions. To make them only readable by you, change file modes as follows: + + $ chmod -v 0400 ca-key.pem key.pem server-key.pem + +Certificates can be world-readable, but you might want to remove write access to +prevent accidental damage: + + $ chmod -v 0444 ca.pem server-cert.pem cert.pem + +Now you can make the Docker daemon only accept connections from clients +providing a certificate trusted by our CA: + + $ docker daemon --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \ + -H=0.0.0.0:2376 + +To be able to connect to Docker and validate its certificate, you now +need to provide your client keys, certificates and trusted CA: + +> **Note**: replace all instances of `$HOST` in the following example with the +> DNS name of your Docker daemon's host. + + $ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem \ + -H=$HOST:2376 version + +> **Note**: +> Docker over TLS should run on TCP port 2376. + +> **Warning**: +> As shown in the example above, you don't have to run the `docker` client +> with `sudo` or the `docker` group when you use certificate authentication. +> That means anyone with the keys can give any instructions to your Docker +> daemon, giving them root access to the machine hosting the daemon. Guard +> these keys as you would a root password! + +## Secure by default + +If you want to secure your Docker client connections by default, you can move +the files to the `.docker` directory in your home directory -- and set the +`DOCKER_HOST` and `DOCKER_TLS_VERIFY` variables as well (instead of passing +`-H=tcp://$HOST:2376` and `--tlsverify` on every call). + + $ mkdir -pv ~/.docker + $ cp -v {ca,cert,key}.pem ~/.docker + $ export DOCKER_HOST=tcp://$HOST:2376 DOCKER_TLS_VERIFY=1 + +Docker will now connect securely by default: + + $ docker ps + +## Other modes + +If you don't want to have complete two-way authentication, you can run +Docker in various other modes by mixing the flags. + +### Daemon modes + + - `tlsverify`, `tlscacert`, `tlscert`, `tlskey` set: Authenticate clients + - `tls`, `tlscert`, `tlskey`: Do not authenticate clients + +### Client modes + + - `tls`: Authenticate server based on public/default CA pool + - `tlsverify`, `tlscacert`: Authenticate server based on given CA + - `tls`, `tlscert`, `tlskey`: Authenticate with client certificate, do not + authenticate server based on given CA + - `tlsverify`, `tlscacert`, `tlscert`, `tlskey`: Authenticate with client + certificate and authenticate server based on given CA + +If found, the client will send its client certificate, so you just need +to drop your keys into `~/.docker/{ca,cert,key}.pem`. Alternatively, +if you want to store your keys in another location, you can specify that +location using the environment variable `DOCKER_CERT_PATH`. + + $ export DOCKER_CERT_PATH=~/.docker/zone1/ + $ docker --tlsverify ps + +### Connecting to the secure Docker port using `curl` + +To use `curl` to make test API requests, you need to use three extra command line +flags: + + $ curl https://$HOST:2376/images/json \ + --cert ~/.docker/cert.pem \ + --key ~/.docker/key.pem \ + --cacert ~/.docker/ca.pem diff --git a/docs/articles/https/Dockerfile b/docs/articles/https/Dockerfile new file mode 100644 index 00000000..a3cc132c --- /dev/null +++ b/docs/articles/https/Dockerfile @@ -0,0 +1,10 @@ +FROM debian + +RUN apt-get update && apt-get install -yq openssl + +ADD make_certs.sh / + + +WORKDIR /data +VOLUME ["/data"] +CMD /make_certs.sh diff --git a/docs/articles/https/Makefile b/docs/articles/https/Makefile new file mode 100644 index 00000000..f06e0d25 --- /dev/null +++ b/docs/articles/https/Makefile @@ -0,0 +1,24 @@ + +HOST:=boot2docker + +makescript: + ./parsedocs.sh > make_certs.sh + +build: clean makescript + docker build -t makecerts . + +cert: build + docker run --rm -it -v $(CURDIR):/data -e HOST=$(HOST) -e YOUR_PUBLIC_IP=$(shell ip a | grep "inet " | sed "s/.*inet \([0-9.]*\)\/.*/\1/" | xargs echo | sed "s/ /,IP:/g") makecerts + +certs: cert + +run: + sudo docker -d -D --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem -H=0.0.0.0:6666 --pidfile=$(pwd)/docker.pid --graph=$(pwd)/graph + +client: + sudo docker --tls --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$(HOST):6666 version + sudo docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$(HOST):6666 info + sudo curl https://$(HOST):6666/images/json --cert ./cert.pem --key ./key.pem --cacert ./ca.pem + +clean: + rm -f ca-key.pem ca.pem ca.srl cert.pem client.csr extfile.cnf key.pem server-cert.pem server-key.pem server.csr extfile.cnf diff --git a/docs/articles/https/README.md b/docs/articles/https/README.md new file mode 100644 index 00000000..1faca04a --- /dev/null +++ b/docs/articles/https/README.md @@ -0,0 +1,26 @@ + + +This is an initial attempt to make it easier to test the examples in the https.md +doc + +at this point, it has to be a manual thing, and I've been running it in boot2docker + +so my process is + +$ boot2docker ssh +$$ git clone https://github.com/docker/docker +$$ cd docker/docs/articles/https +$$ make cert +lots of things to see and manually answer, as openssl wants to be interactive +**NOTE:** make sure you enter the hostname (`boot2docker` in my case) when prompted for `Computer Name`) +$$ sudo make run + +start another terminal + +$ boot2docker ssh +$$ cd docker/docs/articles/https +$$ make client + +the last will connect first with `--tls` and then with `--tlsverify` + +both should succeed diff --git a/docs/articles/https/make_certs.sh b/docs/articles/https/make_certs.sh new file mode 100755 index 00000000..39001fdb --- /dev/null +++ b/docs/articles/https/make_certs.sh @@ -0,0 +1,23 @@ +#!/bin/sh +openssl genrsa -aes256 -out ca-key.pem 2048 +openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem +openssl genrsa -out server-key.pem 2048 +openssl req -subj "/CN=$HOST" -new -key server-key.pem -out server.csr +echo subjectAltName = IP:$YOUR_PUBLIC_IP > extfile.cnf +openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -extfile extfile.cnf +openssl genrsa -out key.pem 2048 +openssl req -subj '/CN=client' -new -key key.pem -out client.csr +echo extendedKeyUsage = clientAuth > extfile.cnf +openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile extfile.cnf +rm -v client.csr server.csr +chmod -v 0400 ca-key.pem key.pem server-key.pem +chmod -v 0444 ca.pem server-cert.pem cert.pem +# docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem -H=0.0.0.0:7778 +# docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$HOST:7778 version +mkdir -pv ~/.docker +cp -v {ca,cert,key}.pem ~/.docker +export DOCKER_HOST=tcp://$HOST:7778 DOCKER_TLS_VERIFY=1 +# docker ps +export DOCKER_CERT_PATH=~/.docker/zone1/ +# docker --tlsverify ps +# curl https://$HOST:7778/images/json --cert ~/.docker/cert.pem --key ~/.docker/key.pem --cacert ~/.docker/ca.pem diff --git a/docs/articles/https/parsedocs.sh b/docs/articles/https/parsedocs.sh new file mode 100755 index 00000000..f9df33c3 --- /dev/null +++ b/docs/articles/https/parsedocs.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +echo "#!/bin/sh" +cat ../https.md | awk '{if (sub(/\\$/,"")) printf "%s", $0; else print $0}' \ + | grep ' $ ' \ + | sed 's/ $ //g' \ + | sed 's/2375/7777/g' \ + | sed 's/2376/7778/g' \ + | sed 's/^docker/# docker/g' \ + | sed 's/^curl/# curl/g' diff --git a/docs/articles/networking.md b/docs/articles/networking.md new file mode 100644 index 00000000..94b698a1 --- /dev/null +++ b/docs/articles/networking.md @@ -0,0 +1,1127 @@ + + +# Network configuration + +## Summary + +When Docker starts, it creates a virtual interface named `docker0` on +the host machine. It randomly chooses an address and subnet from the +private range defined by [RFC 1918](http://tools.ietf.org/html/rfc1918) +that are not in use on the host machine, and assigns it to `docker0`. +Docker made the choice `172.17.42.1/16` when I started it a few minutes +ago, for example — a 16-bit netmask providing 65,534 addresses for the +host machine and its containers. The MAC address is generated using the +IP address allocated to the container to avoid ARP collisions, using a +range from `02:42:ac:11:00:00` to `02:42:ac:11:ff:ff`. + +> **Note:** +> This document discusses advanced networking configuration +> and options for Docker. In most cases you won't need this information. +> If you're looking to get started with a simpler explanation of Docker +> networking and an introduction to the concept of container linking see +> the [Docker User Guide](/userguide/dockerlinks/). + +But `docker0` is no ordinary interface. It is a virtual *Ethernet +bridge* that automatically forwards packets between any other network +interfaces that are attached to it. This lets containers communicate +both with the host machine and with each other. Every time Docker +creates a container, it creates a pair of “peer” interfaces that are +like opposite ends of a pipe — a packet sent on one will be received on +the other. It gives one of the peers to the container to become its +`eth0` interface and keeps the other peer, with a unique name like +`vethAQI2QT`, out in the namespace of the host machine. By binding +every `veth*` interface to the `docker0` bridge, Docker creates a +virtual subnet shared between the host machine and every Docker +container. + +The remaining sections of this document explain all of the ways that you +can use Docker options and — in advanced cases — raw Linux networking +commands to tweak, supplement, or entirely replace Docker's default +networking configuration. + +## Quick guide to the options + +Here is a quick list of the networking-related Docker command-line +options, in case it helps you find the section below that you are +looking for. + +Some networking command-line options can only be supplied to the Docker +server when it starts up, and cannot be changed once it is running: + + * `-b BRIDGE` or `--bridge=BRIDGE` — see + [Building your own bridge](#bridge-building) + + * `--bip=CIDR` — see + [Customizing docker0](#docker0) + + * `--default-gateway=IP_ADDRESS` — see + [How Docker networks a container](#container-networking) + + * `--default-gateway-v6=IP_ADDRESS` — see + [IPv6](#ipv6) + + * `--fixed-cidr` — see + [Customizing docker0](#docker0) + + * `--fixed-cidr-v6` — see + [IPv6](#ipv6) + + * `-H SOCKET...` or `--host=SOCKET...` — + This might sound like it would affect container networking, + but it actually faces in the other direction: + it tells the Docker server over what channels + it should be willing to receive commands + like “run container” and “stop container.” + + * `--icc=true|false` — see + [Communication between containers](#between-containers) + + * `--ip=IP_ADDRESS` — see + [Binding container ports](#binding-ports) + + * `--ipv6=true|false` — see + [IPv6](#ipv6) + + * `--ip-forward=true|false` — see + [Communication between containers and the wider world](#the-world) + + * `--iptables=true|false` — see + [Communication between containers](#between-containers) + + * `--mtu=BYTES` — see + [Customizing docker0](#docker0) + + * `--userland-proxy=true|false` — see + [Binding container ports](#binding-ports) + +There are two networking options that can be supplied either at startup +or when `docker run` is invoked. When provided at startup, set the +default value that `docker run` will later use if the options are not +specified: + + * `--dns=IP_ADDRESS...` — see + [Configuring DNS](#dns) + + * `--dns-search=DOMAIN...` — see + [Configuring DNS](#dns) + +Finally, several networking options can only be provided when calling +`docker run` because they specify something specific to one container: + + * `-h HOSTNAME` or `--hostname=HOSTNAME` — see + [Configuring DNS](#dns) and + [How Docker networks a container](#container-networking) + + * `--link=CONTAINER_NAME_or_ID:ALIAS` — see + [Configuring DNS](#dns) and + [Communication between containers](#between-containers) + + * `--net=bridge|none|container:NAME_or_ID|host` — see + [How Docker networks a container](#container-networking) + + * `--mac-address=MACADDRESS...` — see + [How Docker networks a container](#container-networking) + + * `-p SPEC` or `--publish=SPEC` — see + [Binding container ports](#binding-ports) + + * `-P` or `--publish-all=true|false` — see + [Binding container ports](#binding-ports) + +To supply networking options to the Docker server at startup, use the +`DOCKER_OPTS` variable in the Docker upstart configuration file. For Ubuntu, edit the +variable in `/etc/default/docker` or `/etc/sysconfig/docker` for CentOS. + +The following example illustrates how to configure Docker on Ubuntu to recognize a +newly built bridge. + +Edit the `/etc/default/docker` file: + + $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker + +Then restart the Docker server. + + $ sudo service docker start + +For additional information on bridges, see [building your own +bridge](#building-your-own-bridge) later on this page. + +The following sections tackle all of the above topics in an order that we can move roughly from simplest to most complex. + +## Configuring DNS + + + +How can Docker supply each container with a hostname and DNS +configuration, without having to build a custom image with the hostname +written inside? Its trick is to overlay three crucial `/etc` files +inside the container with virtual files where it can write fresh +information. You can see this by running `mount` inside a container: + + $$ mount + ... + /dev/disk/by-uuid/1fec...ebdf on /etc/hostname type ext4 ... + /dev/disk/by-uuid/1fec...ebdf on /etc/hosts type ext4 ... + /dev/disk/by-uuid/1fec...ebdf on /etc/resolv.conf type ext4 ... + ... + +This arrangement allows Docker to do clever things like keep +`resolv.conf` up to date across all containers when the host machine +receives new configuration over DHCP later. The exact details of how +Docker maintains these files inside the container can change from one +Docker version to the next, so you should leave the files themselves +alone and use the following Docker options instead. + +Four different options affect container domain name services. + + * `-h HOSTNAME` or `--hostname=HOSTNAME` — sets the hostname by which + the container knows itself. This is written into `/etc/hostname`, + into `/etc/hosts` as the name of the container's host-facing IP + address, and is the name that `/bin/bash` inside the container will + display inside its prompt. But the hostname is not easy to see from + outside the container. It will not appear in `docker ps` nor in the + `/etc/hosts` file of any other container. + + * `--link=CONTAINER_NAME_or_ID:ALIAS` — using this option as you `run` a + container gives the new container's `/etc/hosts` an extra entry + named `ALIAS` that points to the IP address of the container identified by + `CONTAINER_NAME_or_ID`. This lets processes inside the new container + connect to the hostname `ALIAS` without having to know its IP. The + `--link=` option is discussed in more detail below, in the section + [Communication between containers](#between-containers). Because + Docker may assign a different IP address to the linked containers + on restart, Docker updates the `ALIAS` entry in the `/etc/hosts` file + of the recipient containers. + + * `--dns=IP_ADDRESS...` — sets the IP addresses added as `server` + lines to the container's `/etc/resolv.conf` file. Processes in the + container, when confronted with a hostname not in `/etc/hosts`, will + connect to these IP addresses on port 53 looking for name resolution + services. + + * `--dns-search=DOMAIN...` — sets the domain names that are searched + when a bare unqualified hostname is used inside of the container, by + writing `search` lines into the container's `/etc/resolv.conf`. + When a container process attempts to access `host` and the search + domain `example.com` is set, for instance, the DNS logic will not + only look up `host` but also `host.example.com`. + Use `--dns-search=.` if you don't wish to set the search domain. + +Regarding DNS settings, in the absence of either the `--dns=IP_ADDRESS...` +or the `--dns-search=DOMAIN...` option, Docker makes each container's +`/etc/resolv.conf` look like the `/etc/resolv.conf` of the host machine (where +the `docker` daemon runs). When creating the container's `/etc/resolv.conf`, +the daemon filters out all localhost IP address `nameserver` entries from +the host's original file. + +Filtering is necessary because all localhost addresses on the host are +unreachable from the container's network. After this filtering, if there +are no more `nameserver` entries left in the container's `/etc/resolv.conf` +file, the daemon adds public Google DNS nameservers +(8.8.8.8 and 8.8.4.4) to the container's DNS configuration. If IPv6 is +enabled on the daemon, the public IPv6 Google DNS nameservers will also +be added (2001:4860:4860::8888 and 2001:4860:4860::8844). + +> **Note**: +> If you need access to a host's localhost resolver, you must modify your +> DNS service on the host to listen on a non-localhost address that is +> reachable from within the container. + +You might wonder what happens when the host machine's +`/etc/resolv.conf` file changes. The `docker` daemon has a file change +notifier active which will watch for changes to the host DNS configuration. + +> **Note**: +> The file change notifier relies on the Linux kernel's inotify feature. +> Because this feature is currently incompatible with the overlay filesystem +> driver, a Docker daemon using "overlay" will not be able to take advantage +> of the `/etc/resolv.conf` auto-update feature. + +When the host file changes, all stopped containers which have a matching +`resolv.conf` to the host will be updated immediately to this newest host +configuration. Containers which are running when the host configuration +changes will need to stop and start to pick up the host changes due to lack +of a facility to ensure atomic writes of the `resolv.conf` file while the +container is running. If the container's `resolv.conf` has been edited since +it was started with the default configuration, no replacement will be +attempted as it would overwrite the changes performed by the container. +If the options (`--dns` or `--dns-search`) have been used to modify the +default host configuration, then the replacement with an updated host's +`/etc/resolv.conf` will not happen as well. + +> **Note**: +> For containers which were created prior to the implementation of +> the `/etc/resolv.conf` update feature in Docker 1.5.0: those +> containers will **not** receive updates when the host `resolv.conf` +> file changes. Only containers created with Docker 1.5.0 and above +> will utilize this auto-update feature. + +## Communication between containers and the wider world + + + +Whether a container can talk to the world is governed by two factors. + +1. Is the host machine willing to forward IP packets? This is governed + by the `ip_forward` system parameter. Packets can only pass between + containers if this parameter is `1`. Usually you will simply leave + the Docker server at its default setting `--ip-forward=true` and + Docker will go set `ip_forward` to `1` for you when the server + starts up. If you set `--ip-forward=false` and your system's kernel + has it enabled, the `--ip-forward=false` option has no effect. + To check the setting on your kernel or to turn it on manually: + + $ sysctl net.ipv4.conf.all.forwarding + net.ipv4.conf.all.forwarding = 0 + $ sysctl net.ipv4.conf.all.forwarding=1 + $ sysctl net.ipv4.conf.all.forwarding + net.ipv4.conf.all.forwarding = 1 + + Many using Docker will want `ip_forward` to be on, to at + least make communication *possible* between containers and + the wider world. + + May also be needed for inter-container communication if you are + in a multiple bridge setup. + +2. Do your `iptables` allow this particular connection? Docker will + never make changes to your system `iptables` rules if you set + `--iptables=false` when the daemon starts. Otherwise the Docker + server will append forwarding rules to the `DOCKER` filter chain. + +Docker will not delete or modify any pre-existing rules from the `DOCKER` +filter chain. This allows the user to create in advance any rules required +to further restrict access to the containers. + +Docker's forward rules permit all external source IPs by default. To allow +only a specific IP or network to access the containers, insert a negated +rule at the top of the `DOCKER` filter chain. For example, to restrict +external access such that *only* source IP 8.8.8.8 can access the +containers, the following rule could be added: + + $ iptables -I DOCKER -i ext_if ! -s 8.8.8.8 -j DROP + +## Communication between containers + + + +Whether two containers can communicate is governed, at the operating +system level, by two factors. + +1. Does the network topology even connect the containers' network + interfaces? By default Docker will attach all containers to a + single `docker0` bridge, providing a path for packets to travel + between them. See the later sections of this document for other + possible topologies. + +2. Do your `iptables` allow this particular connection? Docker will never + make changes to your system `iptables` rules if you set + `--iptables=false` when the daemon starts. Otherwise the Docker server + will add a default rule to the `FORWARD` chain with a blanket `ACCEPT` + policy if you retain the default `--icc=true`, or else will set the + policy to `DROP` if `--icc=false`. + +It is a strategic question whether to leave `--icc=true` or change it to +`--icc=false` so that +`iptables` will protect other containers — and the main host — from +having arbitrary ports probed or accessed by a container that gets +compromised. + +If you choose the most secure setting of `--icc=false`, then how can +containers communicate in those cases where you *want* them to provide +each other services? + +The answer is the `--link=CONTAINER_NAME_or_ID:ALIAS` option, which was +mentioned in the previous section because of its effect upon name +services. If the Docker daemon is running with both `--icc=false` and +`--iptables=true` then, when it sees `docker run` invoked with the +`--link=` option, the Docker server will insert a pair of `iptables` +`ACCEPT` rules so that the new container can connect to the ports +exposed by the other container — the ports that it mentioned in the +`EXPOSE` lines of its `Dockerfile`. Docker has more documentation on +this subject — see the [linking Docker containers](/userguide/dockerlinks) +page for further details. + +> **Note**: +> The value `CONTAINER_NAME` in `--link=` must either be an +> auto-assigned Docker name like `stupefied_pare` or else the name you +> assigned with `--name=` when you ran `docker run`. It cannot be a +> hostname, which Docker will not recognize in the context of the +> `--link=` option. + +You can run the `iptables` command on your Docker host to see whether +the `FORWARD` chain has a default policy of `ACCEPT` or `DROP`: + + # When --icc=false, you should see a DROP rule: + + $ sudo iptables -L -n + ... + Chain FORWARD (policy ACCEPT) + target prot opt source destination + DOCKER all -- 0.0.0.0/0 0.0.0.0/0 + DROP all -- 0.0.0.0/0 0.0.0.0/0 + ... + + # When a --link= has been created under --icc=false, + # you should see port-specific ACCEPT rules overriding + # the subsequent DROP policy for all other packets: + + $ sudo iptables -L -n + ... + Chain FORWARD (policy ACCEPT) + target prot opt source destination + DOCKER all -- 0.0.0.0/0 0.0.0.0/0 + DROP all -- 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER (1 references) + target prot opt source destination + ACCEPT tcp -- 172.17.0.2 172.17.0.3 tcp spt:80 + ACCEPT tcp -- 172.17.0.3 172.17.0.2 tcp dpt:80 + +> **Note**: +> Docker is careful that its host-wide `iptables` rules fully expose +> containers to each other's raw IP addresses, so connections from one +> container to another should always appear to be originating from the +> first container's own IP address. + +## Binding container ports to the host + + + +By default Docker containers can make connections to the outside world, +but the outside world cannot connect to containers. Each outgoing +connection will appear to originate from one of the host machine's own +IP addresses thanks to an `iptables` masquerading rule on the host +machine that the Docker server creates when it starts: + + # You can see that the Docker server creates a + # masquerade rule that let containers connect + # to IP addresses in the outside world: + + $ sudo iptables -t nat -L -n + ... + Chain POSTROUTING (policy ACCEPT) + target prot opt source destination + MASQUERADE all -- 172.17.0.0/16 0.0.0.0/0 + ... + +But if you want containers to accept incoming connections, you will need +to provide special options when invoking `docker run`. These options +are covered in more detail in the [Docker User Guide](/userguide/dockerlinks) +page. There are two approaches. + +First, you can supply `-P` or `--publish-all=true|false` to `docker run` which +is a blanket operation that identifies every port with an `EXPOSE` line in the +image's `Dockerfile` or `--expose ` commandline flag and maps it to a +host port somewhere within an *ephemeral port range*. The `docker port` command +then needs to be used to inspect created mapping. The *ephemeral port range* is +configured by `/proc/sys/net/ipv4/ip_local_port_range` kernel parameter, +typically ranging from 32768 to 61000. + +Mapping can be specified explicitly using `-p SPEC` or `--publish=SPEC` option. +It allows you to particularize which port on docker server - which can be any +port at all, not just one within the *ephemeral port range* — you want mapped +to which port in the container. + +Either way, you should be able to peek at what Docker has accomplished +in your network stack by examining your NAT tables. + + # What your NAT rules might look like when Docker + # is finished setting up a -P forward: + + $ iptables -t nat -L -n + ... + Chain DOCKER (2 references) + target prot opt source destination + DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:49153 to:172.17.0.2:80 + + # What your NAT rules might look like when Docker + # is finished setting up a -p 80:80 forward: + + Chain DOCKER (2 references) + target prot opt source destination + DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 to:172.17.0.2:80 + +You can see that Docker has exposed these container ports on `0.0.0.0`, +the wildcard IP address that will match any possible incoming port on +the host machine. If you want to be more restrictive and only allow +container services to be contacted through a specific external interface +on the host machine, you have two choices. When you invoke `docker run` +you can use either `-p IP:host_port:container_port` or `-p IP::port` to +specify the external interface for one particular binding. + +Or if you always want Docker port forwards to bind to one specific IP +address, you can edit your system-wide Docker server settings and add the +option `--ip=IP_ADDRESS`. Remember to restart your Docker server after +editing this setting. + +> **Note**: +> With hairpin NAT enabled (`--userland-proxy=false`), containers port exposure +> is achieved purely through iptables rules, and no attempt to bind the exposed +> port is ever made. This means that nothing prevents shadowing a previously +> listening service outside of Docker through exposing the same port for a +> container. In such conflicting situation, Docker created iptables rules will +> take precedence and route to the container. + +The `--userland-proxy` parameter, true by default, provides a userland +implementation for inter-container and outside-to-container communication. When +disabled, Docker uses both an additional `MASQUERADE` iptable rule and the +`net.ipv4.route_localnet` kernel parameter which allow the host machine to +connect to a local container exposed port through the commonly used loopback +address: this alternative is preferred for performance reason. + +Again, this topic is covered without all of these low-level networking +details in the [Docker User Guide](/userguide/dockerlinks/) document if you +would like to use that as your port redirection reference instead. + +## IPv6 + + + +As we are [running out of IPv4 addresses](http://en.wikipedia.org/wiki/IPv4_address_exhaustion) +the IETF has standardized an IPv4 successor, [Internet Protocol Version 6](http://en.wikipedia.org/wiki/IPv6) +, in [RFC 2460](https://www.ietf.org/rfc/rfc2460.txt). Both protocols, IPv4 and +IPv6, reside on layer 3 of the [OSI model](http://en.wikipedia.org/wiki/OSI_model). + + +### IPv6 with Docker +By default, the Docker server configures the container network for IPv4 only. +You can enable IPv4/IPv6 dualstack support by running the Docker daemon with the +`--ipv6` flag. Docker will set up the bridge `docker0` with the IPv6 +[link-local address](http://en.wikipedia.org/wiki/Link-local_address) `fe80::1`. + +By default, containers that are created will only get a link-local IPv6 address. +To assign globally routable IPv6 addresses to your containers you have to +specify an IPv6 subnet to pick the addresses from. Set the IPv6 subnet via the +`--fixed-cidr-v6` parameter when starting Docker daemon: + + docker daemon --ipv6 --fixed-cidr-v6="2001:db8:1::/64" + +The subnet for Docker containers should at least have a size of `/80`. This way +an IPv6 address can end with the container's MAC address and you prevent NDP +neighbor cache invalidation issues in the Docker layer. + +With the `--fixed-cidr-v6` parameter set Docker will add a new route to the +routing table. Further IPv6 routing will be enabled (you may prevent this by +starting Docker daemon with `--ip-forward=false`): + + $ ip -6 route add 2001:db8:1::/64 dev docker0 + $ sysctl net.ipv6.conf.default.forwarding=1 + $ sysctl net.ipv6.conf.all.forwarding=1 + +All traffic to the subnet `2001:db8:1::/64` will now be routed +via the `docker0` interface. + +Be aware that IPv6 forwarding may interfere with your existing IPv6 +configuration: If you are using Router Advertisements to get IPv6 settings for +your host's interfaces you should set `accept_ra` to `2`. Otherwise IPv6 +enabled forwarding will result in rejecting Router Advertisements. E.g., if you +want to configure `eth0` via Router Advertisements you should set: + + $ sysctl net.ipv6.conf.eth0.accept_ra=2 + +![](/article-img/ipv6_basic_host_config.svg) + +Every new container will get an IPv6 address from the defined subnet. Further +a default route will be added on `eth0` in the container via the address +specified by the daemon option `--default-gateway-v6` if present, otherwise +via `fe80::1`: + + docker run -it ubuntu bash -c "ip -6 addr show dev eth0; ip -6 route show" + + 15: eth0: mtu 1500 + inet6 2001:db8:1:0:0:242:ac11:3/64 scope global + valid_lft forever preferred_lft forever + inet6 fe80::42:acff:fe11:3/64 scope link + valid_lft forever preferred_lft forever + + 2001:db8:1::/64 dev eth0 proto kernel metric 256 + fe80::/64 dev eth0 proto kernel metric 256 + default via fe80::1 dev eth0 metric 1024 + +In this example the Docker container is assigned a link-local address with the +network suffix `/64` (here: `fe80::42:acff:fe11:3/64`) and a globally routable +IPv6 address (here: `2001:db8:1:0:0:242:ac11:3/64`). The container will create +connections to addresses outside of the `2001:db8:1::/64` network via the +link-local gateway at `fe80::1` on `eth0`. + +Often servers or virtual machines get a `/64` IPv6 subnet assigned (e.g. +`2001:db8:23:42::/64`). In this case you can split it up further and provide +Docker a `/80` subnet while using a separate `/80` subnet for other +applications on the host: + +![](/article-img/ipv6_slash64_subnet_config.svg) + +In this setup the subnet `2001:db8:23:42::/80` with a range from `2001:db8:23:42:0:0:0:0` +to `2001:db8:23:42:0:ffff:ffff:ffff` is attached to `eth0`, with the host listening +at `2001:db8:23:42::1`. The subnet `2001:db8:23:42:1::/80` with an address range from +`2001:db8:23:42:1:0:0:0` to `2001:db8:23:42:1:ffff:ffff:ffff` is attached to +`docker0` and will be used by containers. + +#### Using NDP proxying + +If your Docker host is only part of an IPv6 subnet but has not got an IPv6 +subnet assigned you can use NDP proxying to connect your containers via IPv6 to +the internet. +For example your host has the IPv6 address `2001:db8::c001`, is part of the +subnet `2001:db8::/64` and your IaaS provider allows you to configure the IPv6 +addresses `2001:db8::c000` to `2001:db8::c00f`: + + $ ip -6 addr show + 1: lo: mtu 65536 + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qlen 1000 + inet6 2001:db8::c001/64 scope global + valid_lft forever preferred_lft forever + inet6 fe80::601:3fff:fea1:9c01/64 scope link + valid_lft forever preferred_lft forever + +Let's split up the configurable address range into two subnets +`2001:db8::c000/125` and `2001:db8::c008/125`. The first one can be used by the +host itself, the latter by Docker: + + docker daemon --ipv6 --fixed-cidr-v6 2001:db8::c008/125 + +You notice the Docker subnet is within the subnet managed by your router that +is connected to `eth0`. This means all devices (containers) with the addresses +from the Docker subnet are expected to be found within the router subnet. +Therefore the router thinks it can talk to these containers directly. + +![](/article-img/ipv6_ndp_proxying.svg) + +As soon as the router wants to send an IPv6 packet to the first container it +will transmit a neighbor solicitation request, asking, who has +`2001:db8::c009`? But it will get no answer because no one on this subnet has +this address. The container with this address is hidden behind the Docker host. +The Docker host has to listen to neighbor solicitation requests for the container +address and send a response that itself is the device that is responsible for +the address. This is done by a Kernel feature called `NDP Proxy`. You can +enable it by executing + + $ sysctl net.ipv6.conf.eth0.proxy_ndp=1 + +Now you can add the container's IPv6 address to the NDP proxy table: + + $ ip -6 neigh add proxy 2001:db8::c009 dev eth0 + +This command tells the Kernel to answer to incoming neighbor solicitation requests +regarding the IPv6 address `2001:db8::c009` on the device `eth0`. As a +consequence of this all traffic to this IPv6 address will go into the Docker +host and it will forward it according to its routing table via the `docker0` +device to the container network: + + $ ip -6 route show + 2001:db8::c008/125 dev docker0 metric 1 + 2001:db8::/64 dev eth0 proto kernel metric 256 + +You have to execute the `ip -6 neigh add proxy ...` command for every IPv6 +address in your Docker subnet. Unfortunately there is no functionality for +adding a whole subnet by executing one command. An alternative approach would be to +use an NDP proxy daemon such as [ndppd](https://github.com/DanielAdolfsson/ndppd). + +### Docker IPv6 cluster + +#### Switched network environment +Using routable IPv6 addresses allows you to realize communication between +containers on different hosts. Let's have a look at a simple Docker IPv6 cluster +example: + +![](/article-img/ipv6_switched_network_example.svg) + +The Docker hosts are in the `2001:db8:0::/64` subnet. Host1 is configured +to provide addresses from the `2001:db8:1::/64` subnet to its containers. It +has three routes configured: + +- Route all traffic to `2001:db8:0::/64` via `eth0` +- Route all traffic to `2001:db8:1::/64` via `docker0` +- Route all traffic to `2001:db8:2::/64` via Host2 with IP `2001:db8::2` + +Host1 also acts as a router on OSI layer 3. When one of the network clients +tries to contact a target that is specified in Host1's routing table Host1 will +forward the traffic accordingly. It acts as a router for all networks it knows: +`2001:db8::/64`, `2001:db8:1::/64` and `2001:db8:2::/64`. + +On Host2 we have nearly the same configuration. Host2's containers will get +IPv6 addresses from `2001:db8:2::/64`. Host2 has three routes configured: + +- Route all traffic to `2001:db8:0::/64` via `eth0` +- Route all traffic to `2001:db8:2::/64` via `docker0` +- Route all traffic to `2001:db8:1::/64` via Host1 with IP `2001:db8:0::1` + +The difference to Host1 is that the network `2001:db8:2::/64` is directly +attached to the host via its `docker0` interface whereas it reaches +`2001:db8:1::/64` via Host1's IPv6 address `2001:db8::1`. + +This way every container is able to contact every other container. The +containers `Container1-*` share the same subnet and contact each other directly. +The traffic between `Container1-*` and `Container2-*` will be routed via Host1 +and Host2 because those containers do not share the same subnet. + +In a switched environment every host has to know all routes to every subnet. You +always have to update the hosts' routing tables once you add or remove a host +to the cluster. + +Every configuration in the diagram that is shown below the dashed line is +handled by Docker: The `docker0` bridge IP address configuration, the route to +the Docker subnet on the host, the container IP addresses and the routes on the +containers. The configuration above the line is up to the user and can be +adapted to the individual environment. + +#### Routed network environment + +In a routed network environment you replace the layer 2 switch with a layer 3 +router. Now the hosts just have to know their default gateway (the router) and +the route to their own containers (managed by Docker). The router holds all +routing information about the Docker subnets. When you add or remove a host to +this environment you just have to update the routing table in the router - not +on every host. + +![](/article-img/ipv6_routed_network_example.svg) + +In this scenario containers of the same host can communicate directly with each +other. The traffic between containers on different hosts will be routed via +their hosts and the router. For example packet from `Container1-1` to +`Container2-1` will be routed through `Host1`, `Router` and `Host2` until it +arrives at `Container2-1`. + +To keep the IPv6 addresses short in this example a `/48` network is assigned to +every host. The hosts use a `/64` subnet of this for its own services and one +for Docker. When adding a third host you would add a route for the subnet +`2001:db8:3::/48` in the router and configure Docker on Host3 with +`--fixed-cidr-v6=2001:db8:3:1::/64`. + +Remember the subnet for Docker containers should at least have a size of `/80`. +This way an IPv6 address can end with the container's MAC address and you +prevent NDP neighbor cache invalidation issues in the Docker layer. So if you +have a `/64` for your whole environment use `/78` subnets for the hosts and +`/80` for the containers. This way you can use 4096 hosts with 16 `/80` subnets +each. + +Every configuration in the diagram that is visualized below the dashed line is +handled by Docker: The `docker0` bridge IP address configuration, the route to +the Docker subnet on the host, the container IP addresses and the routes on the +containers. The configuration above the line is up to the user and can be +adapted to the individual environment. + +## Customizing docker0 + + + +By default, the Docker server creates and configures the host system's +`docker0` interface as an *Ethernet bridge* inside the Linux kernel that +can pass packets back and forth between other physical or virtual +network interfaces so that they behave as a single Ethernet network. + +Docker configures `docker0` with an IP address, netmask and IP +allocation range. The host machine can both receive and send packets to +containers connected to the bridge, and gives it an MTU — the *maximum +transmission unit* or largest packet length that the interface will +allow — of either 1,500 bytes or else a more specific value copied from +the Docker host's interface that supports its default route. These +options are configurable at server startup: + + * `--bip=CIDR` — supply a specific IP address and netmask for the + `docker0` bridge, using standard CIDR notation like + `192.168.1.5/24`. + + * `--fixed-cidr=CIDR` — restrict the IP range from the `docker0` subnet, + using the standard CIDR notation like `172.167.1.0/28`. This range must + be and IPv4 range for fixed IPs (ex: 10.20.0.0/16) and must be a subset + of the bridge IP range (`docker0` or set using `--bridge`). For example + with `--fixed-cidr=192.168.1.0/25`, IPs for your containers will be chosen + from the first half of `192.168.1.0/24` subnet. + + * `--mtu=BYTES` — override the maximum packet length on `docker0`. + + +Once you have one or more containers up and running, you can confirm +that Docker has properly connected them to the `docker0` bridge by +running the `brctl` command on the host machine and looking at the +`interfaces` column of the output. Here is a host with two different +containers connected: + + # Display bridge info + + $ sudo brctl show + bridge name bridge id STP enabled interfaces + docker0 8000.3a1d7362b4ee no veth65f9 + vethdda6 + +If the `brctl` command is not installed on your Docker host, then on +Ubuntu you should be able to run `sudo apt-get install bridge-utils` to +install it. + +Finally, the `docker0` Ethernet bridge settings are used every time you +create a new container. Docker selects a free IP address from the range +available on the bridge each time you `docker run` a new container, and +configures the container's `eth0` interface with that IP address and the +bridge's netmask. The Docker host's own IP address on the bridge is +used as the default gateway by which each container reaches the rest of +the Internet. + + # The network, as seen from a container + + $ docker run -i -t --rm base /bin/bash + + $$ ip addr show eth0 + 24: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 32:6f:e0:35:57:91 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.3/16 scope global eth0 + valid_lft forever preferred_lft forever + inet6 fe80::306f:e0ff:fe35:5791/64 scope link + valid_lft forever preferred_lft forever + + $$ ip route + default via 172.17.42.1 dev eth0 + 172.17.0.0/16 dev eth0 proto kernel scope link src 172.17.0.3 + + $$ exit + +Remember that the Docker host will not be willing to forward container +packets out on to the Internet unless its `ip_forward` system setting is +`1` — see the section above on [Communication between +containers](#between-containers) for details. + +## Building your own bridge + + + +If you want to take Docker out of the business of creating its own +Ethernet bridge entirely, you can set up your own bridge before starting +Docker and use `-b BRIDGE` or `--bridge=BRIDGE` to tell Docker to use +your bridge instead. If you already have Docker up and running with its +old `docker0` still configured, you will probably want to begin by +stopping the service and removing the interface: + + # Stopping Docker and removing docker0 + + $ sudo service docker stop + $ sudo ip link set dev docker0 down + $ sudo brctl delbr docker0 + $ sudo iptables -t nat -F POSTROUTING + +Then, before starting the Docker service, create your own bridge and +give it whatever configuration you want. Here we will create a simple +enough bridge that we really could just have used the options in the +previous section to customize `docker0`, but it will be enough to +illustrate the technique. + + # Create our own bridge + + $ sudo brctl addbr bridge0 + $ sudo ip addr add 192.168.5.1/24 dev bridge0 + $ sudo ip link set dev bridge0 up + + # Confirming that our bridge is up and running + + $ ip addr show bridge0 + 4: bridge0: mtu 1500 qdisc noop state UP group default + link/ether 66:38:d0:0d:76:18 brd ff:ff:ff:ff:ff:ff + inet 192.168.5.1/24 scope global bridge0 + valid_lft forever preferred_lft forever + + # Tell Docker about it and restart (on Ubuntu) + + $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker + $ sudo service docker start + + # Confirming new outgoing NAT masquerade is set up + + $ sudo iptables -t nat -L -n + ... + Chain POSTROUTING (policy ACCEPT) + target prot opt source destination + MASQUERADE all -- 192.168.5.0/24 0.0.0.0/0 + + +The result should be that the Docker server starts successfully and is +now prepared to bind containers to the new bridge. After pausing to +verify the bridge's configuration, try creating a container — you will +see that its IP address is in your new IP address range, which Docker +will have auto-detected. + +Just as we learned in the previous section, you can use the `brctl show` +command to see Docker add and remove interfaces from the bridge as you +start and stop containers, and can run `ip addr` and `ip route` inside a +container to see that it has been given an address in the bridge's IP +address range and has been told to use the Docker host's IP address on +the bridge as its default gateway to the rest of the Internet. + +## How Docker networks a container + + + +While Docker is under active development and continues to tweak and +improve its network configuration logic, the shell commands in this +section are rough equivalents to the steps that Docker takes when +configuring networking for each new container. + +Let's review a few basics. + +To communicate using the Internet Protocol (IP), a machine needs access +to at least one network interface at which packets can be sent and +received, and a routing table that defines the range of IP addresses +reachable through that interface. Network interfaces do not have to be +physical devices. In fact, the `lo` loopback interface available on +every Linux machine (and inside each Docker container) is entirely +virtual — the Linux kernel simply copies loopback packets directly from +the sender's memory into the receiver's memory. + +Docker uses special virtual interfaces to let containers communicate +with the host machine — pairs of virtual interfaces called “peers” that +are linked inside of the host machine's kernel so that packets can +travel between them. They are simple to create, as we will see in a +moment. + +The steps with which Docker configures a container are: + +1. Create a pair of peer virtual interfaces. + +2. Give one of them a unique name like `veth65f9`, keep it inside of + the main Docker host, and bind it to `docker0` or whatever bridge + Docker is supposed to be using. + +3. Toss the other interface over the wall into the new container (which + will already have been provided with an `lo` interface) and rename + it to the much prettier name `eth0` since, inside of the container's + separate and unique network interface namespace, there are no + physical interfaces with which this name could collide. + +4. Set the interface's MAC address according to the `--mac-address` + parameter or generate a random one. + +5. Give the container's `eth0` a new IP address from within the + bridge's range of network addresses. The default route is set to the + IP address passed to the Docker daemon using the `--default-gateway` + option if specified, otherwise to the IP address that the Docker host + owns on the bridge. The MAC address is generated from the IP address + unless otherwise specified. This prevents ARP cache invalidation + problems, when a new container comes up with an IP used in the past by + another container with another MAC. + +With these steps complete, the container now possesses an `eth0` +(virtual) network card and will find itself able to communicate with +other containers and the rest of the Internet. + +You can opt out of the above process for a particular container by +giving the `--net=` option to `docker run`, which takes four possible +values. + + * `--net=bridge` — The default action, that connects the container to + the Docker bridge as described above. + + * `--net=host` — Tells Docker to skip placing the container inside of + a separate network stack. In essence, this choice tells Docker to + **not containerize the container's networking**! While container + processes will still be confined to their own filesystem and process + list and resource limits, a quick `ip addr` command will show you + that, network-wise, they live “outside” in the main Docker host and + have full access to its network interfaces. Note that this does + **not** let the container reconfigure the host network stack — that + would require `--privileged=true` — but it does let container + processes open low-numbered ports like any other root process. + It also allows the container to access local network services + like D-bus. This can lead to processes in the container being + able to do unexpected things like + [restart your computer](https://github.com/docker/docker/issues/6401). + You should use this option with caution. + + * `--net=container:NAME_or_ID` — Tells Docker to put this container's + processes inside of the network stack that has already been created + inside of another container. The new container's processes will be + confined to their own filesystem and process list and resource + limits, but will share the same IP address and port numbers as the + first container, and processes on the two containers will be able to + connect to each other over the loopback interface. + + * `--net=none` — Tells Docker to put the container inside of its own + network stack but not to take any steps to configure its network, + leaving you free to build any of the custom configurations explored + in the last few sections of this document. + +To get an idea of the steps that are necessary if you use `--net=none` +as described in that last bullet point, here are the commands that you +would run to reach roughly the same configuration as if you had let +Docker do all of the configuration: + + # At one shell, start a container and + # leave its shell idle and running + + $ docker run -i -t --rm --net=none base /bin/bash + root@63f36fc01b5f:/# + + # At another shell, learn the container process ID + # and create its namespace entry in /var/run/netns/ + # for the "ip netns" command we will be using below + + $ docker inspect -f '{{.State.Pid}}' 63f36fc01b5f + 2778 + $ pid=2778 + $ sudo mkdir -p /var/run/netns + $ sudo ln -s /proc/$pid/ns/net /var/run/netns/$pid + + # Check the bridge's IP address and netmask + + $ ip addr show docker0 + 21: docker0: ... + inet 172.17.42.1/16 scope global docker0 + ... + + # Create a pair of "peer" interfaces A and B, + # bind the A end to the bridge, and bring it up + + $ sudo ip link add A type veth peer name B + $ sudo brctl addif docker0 A + $ sudo ip link set A up + + # Place B inside the container's network namespace, + # rename to eth0, and activate it with a free IP + + $ sudo ip link set B netns $pid + $ sudo ip netns exec $pid ip link set dev B name eth0 + $ sudo ip netns exec $pid ip link set eth0 address 12:34:56:78:9a:bc + $ sudo ip netns exec $pid ip link set eth0 up + $ sudo ip netns exec $pid ip addr add 172.17.42.99/16 dev eth0 + $ sudo ip netns exec $pid ip route add default via 172.17.42.1 + +At this point your container should be able to perform networking +operations as usual. + +When you finally exit the shell and Docker cleans up the container, the +network namespace is destroyed along with our virtual `eth0` — whose +destruction in turn destroys interface `A` out in the Docker host and +automatically un-registers it from the `docker0` bridge. So everything +gets cleaned up without our having to run any extra commands! Well, +almost everything: + + # Clean up dangling symlinks in /var/run/netns + + find -L /var/run/netns -type l -delete + +Also note that while the script above used modern `ip` command instead +of old deprecated wrappers like `ipconfig` and `route`, these older +commands would also have worked inside of our container. The `ip addr` +command can be typed as `ip a` if you are in a hurry. + +Finally, note the importance of the `ip netns exec` command, which let +us reach inside and configure a network namespace as root. The same +commands would not have worked if run inside of the container, because +part of safe containerization is that Docker strips container processes +of the right to configure their own networks. Using `ip netns exec` is +what let us finish up the configuration without having to take the +dangerous step of running the container itself with `--privileged=true`. + +## Tools and examples + +Before diving into the following sections on custom network topologies, +you might be interested in glancing at a few external tools or examples +of the same kinds of configuration. Here are two: + + * Jérôme Petazzoni has created a `pipework` shell script to help you + connect together containers in arbitrarily complex scenarios: + + + * Brandon Rhodes has created a whole network topology of Docker + containers for the next edition of Foundations of Python Network + Programming that includes routing, NAT'd firewalls, and servers that + offer HTTP, SMTP, POP, IMAP, Telnet, SSH, and FTP: + + +Both tools use networking commands very much like the ones you saw in +the previous section, and will see in the following sections. + +## Building a point-to-point connection + + + +By default, Docker attaches all containers to the virtual subnet +implemented by `docker0`. You can create containers that are each +connected to some different virtual subnet by creating your own bridge +as shown in [Building your own bridge](#bridge-building), starting each +container with `docker run --net=none`, and then attaching the +containers to your bridge with the shell commands shown in [How Docker +networks a container](#container-networking). + +But sometimes you want two particular containers to be able to +communicate directly without the added complexity of both being bound to +a host-wide Ethernet bridge. + +The solution is simple: when you create your pair of peer interfaces, +simply throw *both* of them into containers, and configure them as +classic point-to-point links. The two containers will then be able to +communicate directly (provided you manage to tell each container the +other's IP address, of course). You might adjust the instructions of +the previous section to go something like this: + + # Start up two containers in two terminal windows + + $ docker run -i -t --rm --net=none base /bin/bash + root@1f1f4c1f931a:/# + + $ docker run -i -t --rm --net=none base /bin/bash + root@12e343489d2f:/# + + # Learn the container process IDs + # and create their namespace entries + + $ docker inspect -f '{{.State.Pid}}' 1f1f4c1f931a + 2989 + $ docker inspect -f '{{.State.Pid}}' 12e343489d2f + 3004 + $ sudo mkdir -p /var/run/netns + $ sudo ln -s /proc/2989/ns/net /var/run/netns/2989 + $ sudo ln -s /proc/3004/ns/net /var/run/netns/3004 + + # Create the "peer" interfaces and hand them out + + $ sudo ip link add A type veth peer name B + + $ sudo ip link set A netns 2989 + $ sudo ip netns exec 2989 ip addr add 10.1.1.1/32 dev A + $ sudo ip netns exec 2989 ip link set A up + $ sudo ip netns exec 2989 ip route add 10.1.1.2/32 dev A + + $ sudo ip link set B netns 3004 + $ sudo ip netns exec 3004 ip addr add 10.1.1.2/32 dev B + $ sudo ip netns exec 3004 ip link set B up + $ sudo ip netns exec 3004 ip route add 10.1.1.1/32 dev B + +The two containers should now be able to ping each other and make +connections successfully. Point-to-point links like this do not depend +on a subnet nor a netmask, but on the bare assertion made by `ip route` +that some other single IP address is connected to a particular network +interface. + +Note that point-to-point links can be safely combined with other kinds +of network connectivity — there is no need to start the containers with +`--net=none` if you want point-to-point links to be an addition to the +container's normal networking instead of a replacement. + +A final permutation of this pattern is to create the point-to-point link +between the Docker host and one container, which would allow the host to +communicate with that one container on some single IP address and thus +communicate “out-of-band” of the bridge that connects the other, more +usual containers. But unless you have very specific networking needs +that drive you to such a solution, it is probably far preferable to use +`--icc=false` to lock down inter-container communication, as we explored +earlier. + +## Editing networking config files + +Starting with Docker v.1.2.0, you can now edit `/etc/hosts`, `/etc/hostname` +and `/etc/resolve.conf` in a running container. This is useful if you need +to install bind or other services that might override one of those files. + +Note, however, that changes to these files will not be saved by +`docker commit`, nor will they be saved during `docker run`. +That means they won't be saved in the image, nor will they persist when a +container is restarted; they will only "stick" in a running container. diff --git a/docs/articles/puppet.md b/docs/articles/puppet.md new file mode 100644 index 00000000..34f3712d --- /dev/null +++ b/docs/articles/puppet.md @@ -0,0 +1,99 @@ + + +# Using Puppet + +> *Note:* Please note this is a community contributed installation path. The +> only `official` installation is using the +> [*Ubuntu*](/installation/ubuntulinux) installation +> path. This version may sometimes be out of date. + +## Requirements + +To use this guide you'll need a working installation of Puppet from +[Puppet Labs](https://puppetlabs.com) . + +The module also currently uses the official PPA so only works with +Ubuntu. + +## Installation + +The module is available on the [Puppet +Forge](https://forge.puppetlabs.com/garethr/docker/) and can be +installed using the built-in module tool. + + $ puppet module install garethr/docker + +It can also be found on +[GitHub](https://github.com/garethr/garethr-docker) if you would rather +download the source. + +## Usage + +The module provides a puppet class for installing Docker and two defined +types for managing images and containers. + +### Installation + + include 'docker' + +### Images + +The next step is probably to install a Docker image. For this, we have a +defined type which can be used like so: + + docker::image { 'ubuntu': } + +This is equivalent to running: + + $ docker pull ubuntu + +Note that it will only be downloaded if an image of that name does not +already exist. This is downloading a large binary so on first run can +take a while. For that reason this define turns off the default 5 minute +timeout for the exec type. Note that you can also remove images you no +longer need with: + + docker::image { 'ubuntu': + ensure => 'absent', + } + +### Containers + +Now you have an image where you can run commands within a container +managed by Docker. + + docker::run { 'helloworld': + image => 'ubuntu', + command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"', + } + +This is equivalent to running the following command, but under upstart: + + $ docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done" + +Run also contains a number of optional parameters: + + docker::run { 'helloworld': + image => 'ubuntu', + command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"', + ports => ['4444', '4555'], + volumes => ['/var/lib/couchdb', '/var/log'], + volumes_from => '6446ea52fbc9', + memory_limit => 10485760, # bytes + username => 'example', + hostname => 'example.com', + env => ['FOO=BAR', 'FOO2=BAR2'], + dns => ['8.8.8.8', '8.8.4.4'], + } + +> *Note:* +> The `ports`, `env`, `dns` and `volumes` attributes can be set with either a single +> string or as above with an array of values. diff --git a/docs/articles/registry_mirror.md b/docs/articles/registry_mirror.md new file mode 100644 index 00000000..97049e45 --- /dev/null +++ b/docs/articles/registry_mirror.md @@ -0,0 +1,18 @@ + + +# Run a local registry mirror + +The orginal content was deprecated. [An archived +version](https://docs.docker.com/v1.6/articles/registry_mirror) is available in +the 1.7 documentation. For information about configuring mirrors with the latest +Docker Registry version, please file a support request with [the Distribution +project](https://github.com/docker/distribution/issues). diff --git a/docs/articles/runmetrics.md b/docs/articles/runmetrics.md new file mode 100644 index 00000000..0be55b4e --- /dev/null +++ b/docs/articles/runmetrics.md @@ -0,0 +1,463 @@ + + +# Runtime metrics + + +## Docker stats + +You can use the `docker stats` command to live stream a container's +runtime metrics. The command supports CPU, memory usage, memory limit, +and network IO metrics. + +The following is a sample output from the `docker stats` command + + $ docker stats redis1 redis2 + CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O + redis1 0.07% 796 KB/64 MB 1.21% 788 B/648 B + redis2 0.07% 2.746 MB/64 MB 4.29% 1.266 KB/648 B + + +The [docker stats](/reference/commandline/stats/) reference page has +more details about the `docker stats` command. + +## Control groups + +Linux Containers rely on [control groups]( +https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt) +which not only track groups of processes, but also expose metrics about +CPU, memory, and block I/O usage. You can access those metrics and +obtain network usage metrics as well. This is relevant for "pure" LXC +containers, as well as for Docker containers. + +Control groups are exposed through a pseudo-filesystem. In recent +distros, you should find this filesystem under `/sys/fs/cgroup`. Under +that directory, you will see multiple sub-directories, called devices, +freezer, blkio, etc.; each sub-directory actually corresponds to a different +cgroup hierarchy. + +On older systems, the control groups might be mounted on `/cgroup`, without +distinct hierarchies. In that case, instead of seeing the sub-directories, +you will see a bunch of files in that directory, and possibly some directories +corresponding to existing containers. + +To figure out where your control groups are mounted, you can run: + + $ grep cgroup /proc/mounts + +## Enumerating cgroups + +You can look into `/proc/cgroups` to see the different control group subsystems +known to the system, the hierarchy they belong to, and how many groups they contain. + +You can also look at `/proc//cgroup` to see which control groups a process +belongs to. The control group will be shown as a path relative to the root of +the hierarchy mountpoint; e.g., `/` means “this process has not been assigned into +a particular group”, while `/lxc/pumpkin` means that the process is likely to be +a member of a container named `pumpkin`. + +## Finding the cgroup for a given container + +For each container, one cgroup will be created in each hierarchy. On +older systems with older versions of the LXC userland tools, the name of +the cgroup will be the name of the container. With more recent versions +of the LXC tools, the cgroup will be `lxc/.` + +For Docker containers using cgroups, the container name will be the full +ID or long ID of the container. If a container shows up as ae836c95b4c3 +in `docker ps`, its long ID might be something like +`ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can +look it up with `docker inspect` or `docker ps --no-trunc`. + +Putting everything together to look at the memory metrics for a Docker +container, take a look at `/sys/fs/cgroup/memory/lxc//`. + +## Metrics from cgroups: memory, CPU, block I/O + +For each subsystem (memory, CPU, and block I/O), you will find one or +more pseudo-files containing statistics. + +### Memory metrics: `memory.stat` + +Memory metrics are found in the "memory" cgroup. Note that the memory +control group adds a little overhead, because it does very fine-grained +accounting of the memory usage on your host. Therefore, many distros +chose to not enable it by default. Generally, to enable it, all you have +to do is to add some kernel command-line parameters: +`cgroup_enable=memory swapaccount=1`. + +The metrics are in the pseudo-file `memory.stat`. +Here is what it will look like: + + cache 11492564992 + rss 1930993664 + mapped_file 306728960 + pgpgin 406632648 + pgpgout 403355412 + swap 0 + pgfault 728281223 + pgmajfault 1724 + inactive_anon 46608384 + active_anon 1884520448 + inactive_file 7003344896 + active_file 4489052160 + unevictable 32768 + hierarchical_memory_limit 9223372036854775807 + hierarchical_memsw_limit 9223372036854775807 + total_cache 11492564992 + total_rss 1930993664 + total_mapped_file 306728960 + total_pgpgin 406632648 + total_pgpgout 403355412 + total_swap 0 + total_pgfault 728281223 + total_pgmajfault 1724 + total_inactive_anon 46608384 + total_active_anon 1884520448 + total_inactive_file 7003344896 + total_active_file 4489052160 + total_unevictable 32768 + +The first half (without the `total_` prefix) contains statistics relevant +to the processes within the cgroup, excluding sub-cgroups. The second half +(with the `total_` prefix) includes sub-cgroups as well. + +Some metrics are "gauges", i.e., values that can increase or decrease +(e.g., swap, the amount of swap space used by the members of the cgroup). +Some others are "counters", i.e., values that can only go up, because +they represent occurrences of a specific event (e.g., pgfault, which +indicates the number of page faults which happened since the creation of +the cgroup; this number can never decrease). + + + - **cache:** + the amount of memory used by the processes of this control group + that can be associated precisely with a block on a block device. + When you read from and write to files on disk, this amount will + increase. This will be the case if you use "conventional" I/O + (`open`, `read`, + `write` syscalls) as well as mapped files (with + `mmap`). It also accounts for the memory used by + `tmpfs` mounts, though the reasons are unclear. + + - **rss:** + the amount of memory that *doesn't* correspond to anything on disk: + stacks, heaps, and anonymous memory maps. + + - **mapped_file:** + indicates the amount of memory mapped by the processes in the + control group. It doesn't give you information about *how much* + memory is used; it rather tells you *how* it is used. + + - **pgfault and pgmajfault:** + indicate the number of times that a process of the cgroup triggered + a "page fault" and a "major fault", respectively. A page fault + happens when a process accesses a part of its virtual memory space + which is nonexistent or protected. The former can happen if the + process is buggy and tries to access an invalid address (it will + then be sent a `SIGSEGV` signal, typically + killing it with the famous `Segmentation fault` + message). The latter can happen when the process reads from a memory + zone which has been swapped out, or which corresponds to a mapped + file: in that case, the kernel will load the page from disk, and let + the CPU complete the memory access. It can also happen when the + process writes to a copy-on-write memory zone: likewise, the kernel + will preempt the process, duplicate the memory page, and resume the + write operation on the process` own copy of the page. "Major" faults + happen when the kernel actually has to read the data from disk. When + it just has to duplicate an existing page, or allocate an empty + page, it's a regular (or "minor") fault. + + - **swap:** + the amount of swap currently used by the processes in this cgroup. + + - **active_anon and inactive_anon:** + the amount of *anonymous* memory that has been identified has + respectively *active* and *inactive* by the kernel. "Anonymous" + memory is the memory that is *not* linked to disk pages. In other + words, that's the equivalent of the rss counter described above. In + fact, the very definition of the rss counter is **active_anon** + + **inactive_anon** - **tmpfs** (where tmpfs is the amount of memory + used up by `tmpfs` filesystems mounted by this + control group). Now, what's the difference between "active" and + "inactive"? Pages are initially "active"; and at regular intervals, + the kernel sweeps over the memory, and tags some pages as + "inactive". Whenever they are accessed again, they are immediately + retagged "active". When the kernel is almost out of memory, and time + comes to swap out to disk, the kernel will swap "inactive" pages. + + - **active_file and inactive_file:** + cache memory, with *active* and *inactive* similar to the *anon* + memory above. The exact formula is cache = **active_file** + + **inactive_file** + **tmpfs**. The exact rules used by the kernel + to move memory pages between active and inactive sets are different + from the ones used for anonymous memory, but the general principle + is the same. Note that when the kernel needs to reclaim memory, it + is cheaper to reclaim a clean (=non modified) page from this pool, + since it can be reclaimed immediately (while anonymous pages and + dirty/modified pages have to be written to disk first). + + - **unevictable:** + the amount of memory that cannot be reclaimed; generally, it will + account for memory that has been "locked" with `mlock`. + It is often used by crypto frameworks to make sure that + secret keys and other sensitive material never gets swapped out to + disk. + + - **memory and memsw limits:** + These are not really metrics, but a reminder of the limits applied + to this cgroup. The first one indicates the maximum amount of + physical memory that can be used by the processes of this control + group; the second one indicates the maximum amount of RAM+swap. + +Accounting for memory in the page cache is very complex. If two +processes in different control groups both read the same file +(ultimately relying on the same blocks on disk), the corresponding +memory charge will be split between the control groups. It's nice, but +it also means that when a cgroup is terminated, it could increase the +memory usage of another cgroup, because they are not splitting the cost +anymore for those memory pages. + +### CPU metrics: `cpuacct.stat` + +Now that we've covered memory metrics, everything else will look very +simple in comparison. CPU metrics will be found in the +`cpuacct` controller. + +For each container, you will find a pseudo-file `cpuacct.stat`, +containing the CPU usage accumulated by the processes of the container, +broken down between `user` and `system` time. If you're not familiar +with the distinction, `user` is the time during which the processes were +in direct control of the CPU (i.e., executing process code), and `system` +is the time during which the CPU was executing system calls on behalf of +those processes. + +Those times are expressed in ticks of 1/100th of a second. Actually, +they are expressed in "user jiffies". There are `USER_HZ` +*"jiffies"* per second, and on x86 systems, +`USER_HZ` is 100. This used to map exactly to the +number of scheduler "ticks" per second; but with the advent of higher +frequency scheduling, as well as [tickless kernels]( +http://lwn.net/Articles/549580/), the number of kernel ticks +wasn't relevant anymore. It stuck around anyway, mainly for legacy and +compatibility reasons. + +### Block I/O metrics + +Block I/O is accounted in the `blkio` controller. +Different metrics are scattered across different files. While you can +find in-depth details in the [blkio-controller]( +https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt) +file in the kernel documentation, here is a short list of the most +relevant ones: + + + - **blkio.sectors:** + contain the number of 512-bytes sectors read and written by the + processes member of the cgroup, device by device. Reads and writes + are merged in a single counter. + + - **blkio.io_service_bytes:** + indicates the number of bytes read and written by the cgroup. It has + 4 counters per device, because for each device, it differentiates + between synchronous vs. asynchronous I/O, and reads vs. writes. + + - **blkio.io_serviced:** + the number of I/O operations performed, regardless of their size. It + also has 4 counters per device. + + - **blkio.io_queued:** + indicates the number of I/O operations currently queued for this + cgroup. In other words, if the cgroup isn't doing any I/O, this will + be zero. Note that the opposite is not true. In other words, if + there is no I/O queued, it does not mean that the cgroup is idle + (I/O-wise). It could be doing purely synchronous reads on an + otherwise quiescent device, which is therefore able to handle them + immediately, without queuing. Also, while it is helpful to figure + out which cgroup is putting stress on the I/O subsystem, keep in + mind that is is a relative quantity. Even if a process group does + not perform more I/O, its queue size can increase just because the + device load increases because of other devices. + +## Network metrics + +Network metrics are not exposed directly by control groups. There is a +good explanation for that: network interfaces exist within the context +of *network namespaces*. The kernel could probably accumulate metrics +about packets and bytes sent and received by a group of processes, but +those metrics wouldn't be very useful. You want per-interface metrics +(because traffic happening on the local `lo` +interface doesn't really count). But since processes in a single cgroup +can belong to multiple network namespaces, those metrics would be harder +to interpret: multiple network namespaces means multiple `lo` +interfaces, potentially multiple `eth0` +interfaces, etc.; so this is why there is no easy way to gather network +metrics with control groups. + +Instead we can gather network metrics from other sources: + +### IPtables + +IPtables (or rather, the netfilter framework for which iptables is just +an interface) can do some serious accounting. + +For instance, you can setup a rule to account for the outbound HTTP +traffic on a web server: + + $ iptables -I OUTPUT -p tcp --sport 80 + +There is no `-j` or `-g` flag, +so the rule will just count matched packets and go to the following +rule. + +Later, you can check the values of the counters, with: + + $ iptables -nxvL OUTPUT + +Technically, `-n` is not required, but it will +prevent iptables from doing DNS reverse lookups, which are probably +useless in this scenario. + +Counters include packets and bytes. If you want to setup metrics for +container traffic like this, you could execute a `for` +loop to add two `iptables` rules per +container IP address (one in each direction), in the `FORWARD` +chain. This will only meter traffic going through the NAT +layer; you will also have to add traffic going through the userland +proxy. + +Then, you will need to check those counters on a regular basis. If you +happen to use `collectd`, there is a [nice plugin](https://collectd.org/wiki/index.php/Plugin:IPTables) +to automate iptables counters collection. + +### Interface-level counters + +Since each container has a virtual Ethernet interface, you might want to +check directly the TX and RX counters of this interface. You will notice +that each container is associated to a virtual Ethernet interface in +your host, with a name like `vethKk8Zqi`. Figuring +out which interface corresponds to which container is, unfortunately, +difficult. + +But for now, the best way is to check the metrics *from within the +containers*. To accomplish this, you can run an executable from the host +environment within the network namespace of a container using **ip-netns +magic**. + +The `ip-netns exec` command will let you execute any +program (present in the host system) within any network namespace +visible to the current process. This means that your host will be able +to enter the network namespace of your containers, but your containers +won't be able to access the host, nor their sibling containers. +Containers will be able to “see” and affect their sub-containers, +though. + +The exact format of the command is: + + $ ip netns exec + +For example: + + $ ip netns exec mycontainer netstat -i + +`ip netns` finds the "mycontainer" container by +using namespaces pseudo-files. Each process belongs to one network +namespace, one PID namespace, one `mnt` namespace, +etc., and those namespaces are materialized under +`/proc//ns/`. For example, the network +namespace of PID 42 is materialized by the pseudo-file +`/proc/42/ns/net`. + +When you run `ip netns exec mycontainer ...`, it +expects `/var/run/netns/mycontainer` to be one of +those pseudo-files. (Symlinks are accepted.) + +In other words, to execute a command within the network namespace of a +container, we need to: + +- Find out the PID of any process within the container that we want to investigate; +- Create a symlink from `/var/run/netns/` to `/proc//ns/net` +- Execute `ip netns exec ....` + +Please review [*Enumerating Cgroups*](#enumerating-cgroups) to learn how to find +the cgroup of a process running in the container of which you want to +measure network usage. From there, you can examine the pseudo-file named +`tasks`, which contains the PIDs that are in the +control group (i.e., in the container). Pick any one of them. + +Putting everything together, if the "short ID" of a container is held in +the environment variable `$CID`, then you can do this: + + $ TASKS=/sys/fs/cgroup/devices/$CID*/tasks + $ PID=$(head -n 1 $TASKS) + $ mkdir -p /var/run/netns + $ ln -sf /proc/$PID/ns/net /var/run/netns/$CID + $ ip netns exec $CID netstat -i + +## Tips for high-performance metric collection + +Note that running a new process each time you want to update metrics is +(relatively) expensive. If you want to collect metrics at high +resolutions, and/or over a large number of containers (think 1000 +containers on a single host), you do not want to fork a new process each +time. + +Here is how to collect metrics from a single process. You will have to +write your metric collector in C (or any language that lets you do +low-level system calls). You need to use a special system call, +`setns()`, which lets the current process enter any +arbitrary namespace. It requires, however, an open file descriptor to +the namespace pseudo-file (remember: that's the pseudo-file in +`/proc//ns/net`). + +However, there is a catch: you must not keep this file descriptor open. +If you do, when the last process of the control group exits, the +namespace will not be destroyed, and its network resources (like the +virtual interface of the container) will stay around for ever (or until +you close that file descriptor). + +The right approach would be to keep track of the first PID of each +container, and re-open the namespace pseudo-file each time. + +## Collecting metrics when a container exits + +Sometimes, you do not care about real time metric collection, but when a +container exits, you want to know how much CPU, memory, etc. it has +used. + +Docker makes this difficult because it relies on `lxc-start`, which +carefully cleans up after itself, but it is still possible. It is +usually easier to collect metrics at regular intervals (e.g., every +minute, with the collectd LXC plugin) and rely on that instead. + +But, if you'd still like to gather the stats when a container stops, +here is how: + +For each container, start a collection process, and move it to the +control groups that you want to monitor by writing its PID to the tasks +file of the cgroup. The collection process should periodically re-read +the tasks file to check if it's the last process of the control group. +(If you also want to collect network statistics as explained in the +previous section, you should also move the process to the appropriate +network namespace.) + +When the container exits, `lxc-start` will try to +delete the control groups. It will fail, since the control group is +still in use; but that's fine. You process should now detect that it is +the only one remaining in the group. Now is the right time to collect +all the metrics you need! + +Finally, your process should move itself back to the root control group, +and remove the container control group. To remove a control group, just +`rmdir` its directory. It's counter-intuitive to +`rmdir` a directory as it still contains files; but +remember that this is a pseudo-filesystem, so usual rules don't apply. +After the cleanup is done, the collection process can exit safely. diff --git a/docs/articles/security.md b/docs/articles/security.md new file mode 100644 index 00000000..9f21b0b6 --- /dev/null +++ b/docs/articles/security.md @@ -0,0 +1,283 @@ + + +# Docker security + +There are three major areas to consider when reviewing Docker security: + + - the intrinsic security of the kernel and its support for + namespaces and cgroups; + - the attack surface of the Docker daemon itself; + - loopholes in the container configuration profile, either by default, + or when customized by users. + - the "hardening" security features of the kernel and how they + interact with containers. + +## Kernel namespaces + +Docker containers are very similar to LXC containers, and they have +similar security features. When you start a container with +`docker run`, behind the scenes Docker creates a set of namespaces and control +groups for the container. + +**Namespaces provide the first and most straightforward form of +isolation**: processes running within a container cannot see, and even +less affect, processes running in another container, or in the host +system. + +**Each container also gets its own network stack**, meaning that a +container doesn't get privileged access to the sockets or interfaces +of another container. Of course, if the host system is setup +accordingly, containers can interact with each other through their +respective network interfaces — just like they can interact with +external hosts. When you specify public ports for your containers or use +[*links*](/userguide/dockerlinks) +then IP traffic is allowed between containers. They can ping each other, +send/receive UDP packets, and establish TCP connections, but that can be +restricted if necessary. From a network architecture point of view, all +containers on a given Docker host are sitting on bridge interfaces. This +means that they are just like physical machines connected through a +common Ethernet switch; no more, no less. + +How mature is the code providing kernel namespaces and private +networking? Kernel namespaces were introduced [between kernel version +2.6.15 and +2.6.26](http://lxc.sourceforge.net/index.php/about/kernel-namespaces/). +This means that since July 2008 (date of the 2.6.26 release, now 5 years +ago), namespace code has been exercised and scrutinized on a large +number of production systems. And there is more: the design and +inspiration for the namespaces code are even older. Namespaces are +actually an effort to reimplement the features of [OpenVZ]( +http://en.wikipedia.org/wiki/OpenVZ) in such a way that they could be +merged within the mainstream kernel. And OpenVZ was initially released +in 2005, so both the design and the implementation are pretty mature. + +## Control groups + +Control Groups are another key component of Linux Containers. They +implement resource accounting and limiting. They provide many +useful metrics, but they also help ensure that each container gets +its fair share of memory, CPU, disk I/O; and, more importantly, that a +single container cannot bring the system down by exhausting one of those +resources. + +So while they do not play a role in preventing one container from +accessing or affecting the data and processes of another container, they +are essential to fend off some denial-of-service attacks. They are +particularly important on multi-tenant platforms, like public and +private PaaS, to guarantee a consistent uptime (and performance) even +when some applications start to misbehave. + +Control Groups have been around for a while as well: the code was +started in 2006, and initially merged in kernel 2.6.24. + +## Docker daemon attack surface + +Running containers (and applications) with Docker implies running the +Docker daemon. This daemon currently requires `root` privileges, and you +should therefore be aware of some important details. + +First of all, **only trusted users should be allowed to control your +Docker daemon**. This is a direct consequence of some powerful Docker +features. Specifically, Docker allows you to share a directory between +the Docker host and a guest container; and it allows you to do so +without limiting the access rights of the container. This means that you +can start a container where the `/host` directory will be the `/` directory +on your host; and the container will be able to alter your host filesystem +without any restriction. This is similar to how virtualization systems +allow filesystem resource sharing. Nothing prevents you from sharing your +root filesystem (or even your root block device) with a virtual machine. + +This has a strong security implication: for example, if you instrument Docker +from a web server to provision containers through an API, you should be +even more careful than usual with parameter checking, to make sure that +a malicious user cannot pass crafted parameters causing Docker to create +arbitrary containers. + +For this reason, the REST API endpoint (used by the Docker CLI to +communicate with the Docker daemon) changed in Docker 0.5.2, and now +uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the +latter being prone to cross-site-scripting attacks if you happen to run +Docker directly on your local machine, outside of a VM). You can then +use traditional UNIX permission checks to limit access to the control +socket. + +You can also expose the REST API over HTTP if you explicitly decide to do so. +However, if you do that, being aware of the above mentioned security +implication, you should ensure that it will be reachable only from a +trusted network or VPN; or protected with e.g., `stunnel` and client SSL +certificates. You can also secure them with [HTTPS and +certificates](/articles/https/). + +The daemon is also potentially vulnerable to other inputs, such as image +loading from either disk with 'docker load', or from the network with +'docker pull'. This has been a focus of improvement in the community, +especially for 'pull' security. While these overlap, it should be noted +that 'docker load' is a mechanism for backup and restore and is not +currently considered a secure mechanism for loading images. As of +Docker 1.3.2, images are now extracted in a chrooted subprocess on +Linux/Unix platforms, being the first-step in a wider effort toward +privilege separation. + +Eventually, it is expected that the Docker daemon will run restricted +privileges, delegating operations well-audited sub-processes, +each with its own (very limited) scope of Linux capabilities, +virtual network setup, filesystem management, etc. That is, most likely, +pieces of the Docker engine itself will run inside of containers. + +Finally, if you run Docker on a server, it is recommended to run +exclusively Docker in the server, and move all other services within +containers controlled by Docker. Of course, it is fine to keep your +favorite admin tools (probably at least an SSH server), as well as +existing monitoring/supervision processes (e.g., NRPE, collectd, etc). + +## Linux kernel capabilities + +By default, Docker starts containers with a restricted set of +capabilities. What does that mean? + +Capabilities turn the binary "root/non-root" dichotomy into a +fine-grained access control system. Processes (like web servers) that +just need to bind on a port below 1024 do not have to run as root: they +can just be granted the `net_bind_service` capability instead. And there +are many other capabilities, for almost all the specific areas where root +privileges are usually needed. + +This means a lot for container security; let's see why! + +Your average server (bare metal or virtual machine) needs to run a bunch +of processes as root. Those typically include SSH, cron, syslogd; +hardware management tools (e.g., load modules), network configuration +tools (e.g., to handle DHCP, WPA, or VPNs), and much more. A container is +very different, because almost all of those tasks are handled by the +infrastructure around the container: + + - SSH access will typically be managed by a single server running on + the Docker host; + - `cron`, when necessary, should run as a user + process, dedicated and tailored for the app that needs its + scheduling service, rather than as a platform-wide facility; + - log management will also typically be handed to Docker, or by + third-party services like Loggly or Splunk; + - hardware management is irrelevant, meaning that you never need to + run `udevd` or equivalent daemons within + containers; + - network management happens outside of the containers, enforcing + separation of concerns as much as possible, meaning that a container + should never need to perform `ifconfig`, + `route`, or ip commands (except when a container + is specifically engineered to behave like a router or firewall, of + course). + +This means that in most cases, containers will not need "real" root +privileges *at all*. And therefore, containers can run with a reduced +capability set; meaning that "root" within a container has much less +privileges than the real "root". For instance, it is possible to: + + - deny all "mount" operations; + - deny access to raw sockets (to prevent packet spoofing); + - deny access to some filesystem operations, like creating new device + nodes, changing the owner of files, or altering attributes (including + the immutable flag); + - deny module loading; + - and many others. + +This means that even if an intruder manages to escalate to root within a +container, it will be much harder to do serious damage, or to escalate +to the host. + +This won't affect regular web apps; but malicious users will find that +the arsenal at their disposal has shrunk considerably! By default Docker +drops all capabilities except [those +needed](https://github.com/docker/docker/blob/master/daemon/execdriver/native/template/default_template.go), +a whitelist instead of a blacklist approach. You can see a full list of +available capabilities in [Linux +manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html). + +One primary risk with running Docker containers is that the default set +of capabilities and mounts given to a container may provide incomplete +isolation, either independently, or when used in combination with +kernel vulnerabilities. + +Docker supports the addition and removal of capabilities, allowing use +of a non-default profile. This may make Docker more secure through +capability removal, or less secure through the addition of capabilities. +The best practice for users would be to remove all capabilities except +those explicitly required for their processes. + +## Other kernel security features + +Capabilities are just one of the many security features provided by +modern Linux kernels. It is also possible to leverage existing, +well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with +Docker. + +While Docker currently only enables capabilities, it doesn't interfere +with the other systems. This means that there are many different ways to +harden a Docker host. Here are a few examples. + + - You can run a kernel with GRSEC and PAX. This will add many safety + checks, both at compile-time and run-time; it will also defeat many + exploits, thanks to techniques like address randomization. It doesn't + require Docker-specific configuration, since those security features + apply system-wide, independent of containers. + - If your distribution comes with security model templates for + Docker containers, you can use them out of the box. For instance, we + ship a template that works with AppArmor and Red Hat comes with SELinux + policies for Docker. These templates provide an extra safety net (even + though it overlaps greatly with capabilities). + - You can define your own policies using your favorite access control + mechanism. + +Just like there are many third-party tools to augment Docker containers +with e.g., special network topologies or shared filesystems, you can +expect to see tools to harden existing Docker containers without +affecting Docker's core. + +Recent improvements in Linux namespaces will soon allow to run +full-featured containers without root privileges, thanks to the new user +namespace. This is covered in detail [here]( +http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/). +Moreover, this will solve the problem caused by sharing filesystems +between host and guest, since the user namespace allows users within +containers (including the root user) to be mapped to other users in the +host system. + +Today, Docker does not directly support user namespaces, but they +may still be utilized by Docker containers on supported kernels, +by directly using the clone syscall, or utilizing the 'unshare' +utility. Using this, some users may find it possible to drop +more capabilities from their process as user namespaces provide +an artificial capabilities set. Likewise, however, this artificial +capabilities set may require use of 'capsh' to restrict the +user-namespace capabilities set when using 'unshare'. + +Eventually, it is expected that Docker will have direct, native support +for user-namespaces, simplifying the process of hardening containers. + +## Conclusions + +Docker containers are, by default, quite secure; especially if you take +care of running your processes inside the containers as non-privileged +users (i.e., non-`root`). + +You can add an extra layer of safety by enabling AppArmor, SELinux, +GRSEC, or your favorite hardening solution. + +Last but not least, if you see interesting security features in other +containerization systems, these are simply kernels features that may +be implemented in Docker as well. We welcome users to submit issues, +pull requests, and communicate via the mailing list. + +References: +* [Docker Containers: How Secure Are They? (2013)]( +http://blog.docker.com/2013/08/containers-docker-how-secure-are-they/). +* [On the Security of Containers (2014)](https://medium.com/@ewindisch/on-the-security-of-containers-2c60ffe25a9e). diff --git a/docs/articles/systemd.md b/docs/articles/systemd.md new file mode 100644 index 00000000..c8fe3db4 --- /dev/null +++ b/docs/articles/systemd.md @@ -0,0 +1,149 @@ + + +# Control and configure Docker with systemd + +Many Linux distributions use systemd to start the Docker daemon. This document +shows a few examples of how to customise Docker's settings. + +## Starting the Docker daemon + +Once Docker is installed, you will need to start the Docker daemon. + + $ sudo systemctl start docker + # or on older distributions, you may need to use + $ sudo service docker start + +If you want Docker to start at boot, you should also: + + $ sudo systemctl enable docker + # or on older distributions, you may need to use + $ sudo chkconfig docker on + +## Custom Docker daemon options + +There are a number of ways to configure the daemon flags and environment variables +for your Docker daemon. + +The recommended way is to use a systemd drop-in file. These are local files in +the `/etc/systemd/system/docker.service.d` directory. This could also be +`/etc/systemd/system/docker.service`, which also works for overriding the +defaults from `/lib/systemd/system/docker.service`. + +However, if you had previously used a package which had an `EnvironmentFile` +(often pointing to `/etc/sysconfig/docker`) then for backwards compatibility, +you drop a file in the `/etc/systemd/system/docker.service.d` +directory including the following: + + [Service] + EnvironmentFile=-/etc/sysconfig/docker + EnvironmentFile=-/etc/sysconfig/docker-storage + EnvironmentFile=-/etc/sysconfig/docker-network + ExecStart= + ExecStart=/usr/bin/docker -d -H fd:// $OPTIONS \ + $DOCKER_STORAGE_OPTIONS \ + $DOCKER_NETWORK_OPTIONS \ + $BLOCK_REGISTRY \ + $INSECURE_REGISTRY + +To check if the `docker.service` uses an `EnvironmentFile`: + + $ sudo systemctl show docker | grep EnvironmentFile + EnvironmentFile=-/etc/sysconfig/docker (ignore_errors=yes) + +Alternatively, find out where the service file is located: + + $ sudo systemctl status docker | grep Loaded + Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled) + $ sudo grep EnvironmentFile /usr/lib/systemd/system/docker.service + EnvironmentFile=-/etc/sysconfig/docker + +You can customize the Docker daemon options using override files as explained in the +[HTTP Proxy example](#http-proxy) below. The files located in `/usr/lib/systemd/system` +or `/lib/systemd/system` contain the default options and should not be edited. + +### Runtime directory and storage driver + +You may want to control the disk space used for Docker images, containers +and volumes by moving it to a separate partition. + +In this example, we'll assume that your `docker.service` file looks something like: + + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network.target docker.socket + Requires=docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/docker daemon -H fd:// + LimitNOFILE=1048576 + LimitNPROC=1048576 + + [Install] + Also=docker.socket + +This will allow us to add extra flags via a drop-in file (mentioned above) by +placing a file containing the following in the `/etc/systemd/system/docker.service.d` +directory: + + [Service] + ExecStart= + ExecStart=/usr/bin/docker daemon -H fd:// --graph /mnt/docker-data --storage-driver btrfs + +You can also set other environment variables in this file, for example, the +`HTTP_PROXY` environment variables described below. + +### HTTP proxy + +This example overrides the default `docker.service` file. + +If you are behind a HTTP proxy server, for example in corporate settings, +you will need to add this configuration in the Docker systemd service file. + +First, create a systemd drop-in directory for the docker service: + + mkdir /etc/systemd/system/docker.service.d + +Now create a file called `/etc/systemd/system/docker.service.d/http-proxy.conf` +that adds the `HTTP_PROXY` environment variable: + + [Service] + Environment="HTTP_PROXY=http://proxy.example.com:80/" + +If you have internal Docker registries that you need to contact without +proxying you can specify them via the `NO_PROXY` environment variable: + + Environment="HTTP_PROXY=http://proxy.example.com:80/" "NO_PROXY=localhost,127.0.0.0/8,docker-registry.somecorporation.com" + +Flush changes: + + $ sudo systemctl daemon-reload + +Verify that the configuration has been loaded: + + $ sudo systemctl show docker --property Environment + Environment=HTTP_PROXY=http://proxy.example.com:80/ + +Restart Docker: + + $ sudo systemctl restart docker + +## Manually creating the systemd unit files + +When installing the binary without a package, you may want +to integrate Docker with systemd. For this, simply install the two unit files +(service and socket) from [the github +repository](https://github.com/docker/docker/tree/master/contrib/init/systemd) +to `/etc/systemd/system`. + + diff --git a/docs/articles/using_supervisord.md b/docs/articles/using_supervisord.md new file mode 100644 index 00000000..931f38c1 --- /dev/null +++ b/docs/articles/using_supervisord.md @@ -0,0 +1,118 @@ + + +# Using Supervisor with Docker + +> **Note**: +> - **If you don't like sudo** then see [*Giving non-root +> access*](/installation/binaries/#giving-non-root-access) + +Traditionally a Docker container runs a single process when it is +launched, for example an Apache daemon or a SSH server daemon. Often +though you want to run more than one process in a container. There are a +number of ways you can achieve this ranging from using a simple Bash +script as the value of your container's `CMD` instruction to installing +a process management tool. + +In this example we're going to make use of the process management tool, +[Supervisor](http://supervisord.org/), to manage multiple processes in +our container. Using Supervisor allows us to better control, manage, and +restart the processes we want to run. To demonstrate this we're going to +install and manage both an SSH daemon and an Apache daemon. + +## Creating a Dockerfile + +Let's start by creating a basic `Dockerfile` for our +new image. + + FROM ubuntu:13.04 + MAINTAINER examples@docker.com + +## Installing Supervisor + +We can now install our SSH and Apache daemons as well as Supervisor in +our container. + + RUN apt-get update && apt-get install -y openssh-server apache2 supervisor + RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /var/log/supervisor + +Here we're installing the `openssh-server`, +`apache2` and `supervisor` +(which provides the Supervisor daemon) packages. We're also creating four +new directories that are needed to run our SSH daemon and Supervisor. + +## Adding Supervisor's configuration file + +Now let's add a configuration file for Supervisor. The default file is +called `supervisord.conf` and is located in +`/etc/supervisor/conf.d/`. + + COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +Let's see what is inside our `supervisord.conf` +file. + + [supervisord] + nodaemon=true + + [program:sshd] + command=/usr/sbin/sshd -D + + [program:apache2] + command=/bin/bash -c "source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND" + +The `supervisord.conf` configuration file contains +directives that configure Supervisor and the processes it manages. The +first block `[supervisord]` provides configuration +for Supervisor itself. We're using one directive, `nodaemon` +which tells Supervisor to run interactively rather than +daemonize. + +The next two blocks manage the services we wish to control. Each block +controls a separate process. The blocks contain a single directive, +`command`, which specifies what command to run to +start each process. + +## Exposing ports and running Supervisor + +Now let's finish our `Dockerfile` by exposing some +required ports and specifying the `CMD` instruction +to start Supervisor when our container launches. + + EXPOSE 22 80 + CMD ["/usr/bin/supervisord"] + +Here We've exposed ports 22 and 80 on the container and we're running +the `/usr/bin/supervisord` binary when the container +launches. + +## Building our image + +We can now build our new image. + + $ docker build -t /supervisord . + +## Running our Supervisor container + +Once We've got a built image we can launch a container from it. + + $ docker run -p 22 -p 80 -t -i /supervisord + 2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file) + 2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing + 2013-11-25 18:53:22,342 INFO supervisord started with pid 1 + 2013-11-25 18:53:23,346 INFO spawned: 'sshd' with pid 6 + 2013-11-25 18:53:23,349 INFO spawned: 'apache2' with pid 7 + . . . + +We've launched a new container interactively using the `docker run` command. +That container has run Supervisor and launched the SSH and Apache daemons with +it. We've specified the `-p` flag to expose ports 22 and 80. From here we can +now identify the exposed ports and connect to one or both of the SSH and Apache +daemons. diff --git a/docs/docker-hub/accounts.md b/docs/docker-hub/accounts.md new file mode 100644 index 00000000..a9694f2c --- /dev/null +++ b/docs/docker-hub/accounts.md @@ -0,0 +1,86 @@ + + +# Accounts on Docker Hub + +## Docker Hub accounts + +You can `search` for Docker images and `pull` them from [Docker +Hub](https://hub.docker.com) without signing in or even having an +account. However, in order to `push` images, leave comments or to *star* +a repository, you are going to need a [Docker +Hub](https://hub.docker.com) account. + +### Registration for a Docker Hub account + +You can get a [Docker Hub](https://hub.docker.com) account by +[signing up for one here](https://hub.docker.com/account/signup/). A valid +email address is required to register, which you will need to verify for +account activation. + +### Email activation process + +You need to have at least one verified email address to be able to use your +[Docker Hub](https://hub.docker.com) account. If you can't find the validation email, +you can request another by visiting the [Resend Email Confirmation]( +https://hub.docker.com/account/resend-email-confirmation/) page. + +### Password reset process + +If you can't access your account for some reason, you can reset your password +from the [*Password Reset*](https://hub.docker.com/account/forgot-password/) +page. + +## Organizations and groups + +A Docker Hub organization contains public and private repositories just like +a user account. Access to push, pull or create these organisation owned repositories +is allocated by defining groups of users and then assigning group rights to +specific repositories. This allows you to distribute limited access +Docker images, and to select which Docker Hub users can publish new images. + +### Creating and viewing organizations + +You can see what organizations [you belong to and add new organizations]( +https://hub.docker.com/account/organizations/) from the Account Settings +tab. They are also listed below your user name on your repositories page +and in your account profile. + +![organizations](/docker-hub/hub-images/orgs.png) + +### Organization groups + +Users in the `Owners` group of an organization can create and modify the +membership of groups. + +Unless they are the organization's `Owner`, users can only see groups of which they +are members. + +![groups](/docker-hub/hub-images/groups.png) + +### Repository group permissions + +Use organization groups to manage the users that can interact with your repositories. + +You must be in an organization's `Owners` group to create a new group, Hub +repository, or automated build. As an `Owner`, you then delegate the following +repository access rights to groups: + +| Access Right | Description | +|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Read` | Users with this right can view, search, and pull a private repository. | +| `Write` | Users with this right can push to non-automated repositories on the Docker Hub. | +| `Admin` | Users with this right can modify a repository's "Description", "Collaborators" rights. They can also mark a repository as unlisted, change its "Public/Private" status and "Delete" the repository. Finally, `Admin` rights are required to read the build log on a repo. | +| | | + +Regardless of their actual access rights, users with unverified email addresses +have `Read` access to the repository. Once they have verified their address, +they have their full access rights as granted on the organization. diff --git a/docs/docker-hub/builds.md b/docs/docker-hub/builds.md new file mode 100644 index 00000000..015fcef1 --- /dev/null +++ b/docs/docker-hub/builds.md @@ -0,0 +1,465 @@ + + +# Automated Builds on Docker Hub + +## About Automated Builds + +*Automated Builds* are a special feature of Docker Hub which allow you to +use [Docker Hub's](https://hub.docker.com) build clusters to automatically +create images from a GitHub or Bitbucket repository containing a `Dockerfile` +The system will clone your repository and build the image described by the +`Dockerfile` using the directory the `Dockerfile` is in (and subdirectories) +as the build context. The resulting automated image will then be uploaded +to the Docker Hub registry and marked as an *Automated Build*. + +Automated Builds have several advantages: + +* Users of *your* Automated Build can trust that the resulting +image was built exactly as specified. +* The `Dockerfile` will be available to anyone with access to +your repository on the Docker Hub registry. +* Because the process is automated, Automated Builds help to +make sure that your repository is always up to date. +* Not having to push local Docker images to Docker Hub saves +you both network bandwidth and time. + +Automated Builds are supported for both public and private repositories +on both [GitHub](http://github.com) and [Bitbucket](https://bitbucket.org/). + +To use Automated Builds, you must have an [account on Docker Hub]( +https://docs.docker.com/userguide/dockerhub/#creating-a-docker-hub-account) +and on GitHub and/or Bitbucket. In either case, the account needs +to be properly validated and activated before you can link to it. + +The first time you to set up an Automated Build, your +[Docker Hub](https://hub.docker.com) account will need to be linked to +a GitHub or Bitbucket account. +This will allow the registry to see your repositories. + +If you have previously linked your Docker Hub account, and want to view or modify +that link, click on the "Manage - Settings" link in the sidebar, and then +"Linked Accounts" in your Settings sidebar. + +## Automated Builds from GitHub + +If you've previously linked your Docker Hub account to your GitHub account, +you'll be able to skip to the [Creating an Automated Build](#creating-an-automated-build). + +### Linking your Docker Hub account to a GitHub account + +> *Note:* +> Automated Builds currently require *read* and *write* access since +> [Docker Hub](https://hub.docker.com) needs to setup a GitHub service +> hook. We have no choice here, this is how GitHub manages permissions, sorry! +> We do guarantee nothing else will be touched in your account. + +To get started, log into your Docker Hub account and click the +"+ Add Repository" button at the upper right of the screen. Then select +[Automated Build](https://registry.hub.docker.com/builds/add/). + +Select the [GitHub service](https://registry.hub.docker.com/associate/github/). + +When linking to GitHub, you'll need to select either "Public and Private", +or "Limited" linking. + +The "Public and Private" option is the easiest to use, +as it grants the Docker Hub full access to all of your repositories. GitHub +also allows you to grant access to repositories belonging to your GitHub +organizations. + +By choosing the "Limited" linking, your Docker Hub account only gets permission +to access your public data and public repositories. + +Follow the onscreen instructions to authorize and link your +GitHub account to Docker Hub. Once it is linked, you'll be able to +choose a source repository from which to create the Automatic Build. + +You will be able to review and revoke Docker Hub's access by visiting the +[GitHub User's Applications settings](https://github.com/settings/applications). + +> **Note**: If you delete the GitHub account linkage that is used for one of your +> automated build repositories, the previously built images will still be available. +> If you re-link to that GitHub account later, the automated build can be started +> using the "Start Build" button on the Hub, or if the webhook on the GitHub repository +> still exists, will be triggered by any subsequent commits. + +### Auto builds and limited linked GitHub accounts. + +If you selected to link your GitHub account with only a "Limited" link, then +after creating your automated build, you will need to either manually trigger a +Docker Hub build using the "Start a Build" button, or add the GitHub webhook +manually, as described in [GitHub Service Hooks](#github-service-hooks). + +### Changing the GitHub user link + +If you want to remove, or change the level of linking between your GitHub account +and the Docker Hub, you need to do this in two places. + +First, remove the "Linked Account" from your Docker Hub "Settings". +Then go to your GitHub account's Personal settings, and in the "Applications" +section, "Revoke access". + +You can now re-link your account at any time. + +### GitHub organizations + +GitHub organizations and private repositories forked from organizations will be +made available to auto build using the "Docker Hub Registry" application, which +needs to be added to the organization - and then will apply to all users. + +To check, or request access, go to your GitHub user's "Setting" page, select the +"Applications" section from the left side bar, then click the "View" button for +"Docker Hub Registry". + +![Check User access to GitHub](/docker-hub/hub-images/gh-check-user-org-dh-app-access.png) + +The organization's administrators may need to go to the Organization's "Third +party access" screen in "Settings" to Grant or Deny access to the Docker Hub +Registry application. This change will apply to all organization members. + +![Check Docker Hub application access to Organization](/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png) + +More detailed access controls to specific users and GitHub repositories would be +managed using the GitHub People and Teams interfaces. + +### Creating an Automated Build + +You can [create an Automated Build]( +https://registry.hub.docker.com/builds/github/select/) from any of your +public or private GitHub repositories that have a `Dockerfile`. + +Once you've selected the source repository, you can then configure: + +- The Hub user/org the repository is built to - either your Hub account name, +or the name of any Hub organizations your account is in +- The Docker repository name the image is built to +- If the Docker repository should be "Public" or "Private" + You can change the accessibility options after the repository has been created. + If you add a Private repository to a Hub user, then you can only add other users + as collaborators, and those users will be able to view and pull all images in that + repository. To configure more granular access permissions, such as using groups of + users or allow different users access to different image tags, then you need + to add the Private repository to a Hub organization that your user has Administrator + privilege on. +- If you want the GitHub to notify the Docker Hub when a commit is made, and thus trigger + a rebuild of all the images in this automated build. + +You can also select one or more +- The git branch/tag, which repository sub-directory to use as the context +- The Docker image tag name + +You can set a description for the repository by clicking "Description" link in the righthand side bar after the automated build - note that the "Full Description" will be over-written next build from the README.md file. +has been created. + +### GitHub private submodules + +If your GitHub repository contains links to private submodules, you'll get an +error message in your build. + +Normally, the Docker Hub sets up a deploy key in your GitHub repository. +Unfortunately, GitHub only allows a repository deploy key to access a single repository. + +To work around this, you need to create a dedicated user account in GitHub and attach +the automated build's deploy key that account. This dedicated build account +can be limited to read-only access to just the repositories required to build. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
StepScreenshotDescription
1.First, create the new account in GitHub. It should be given read-only + access to the main repository and all submodules that are needed.
2.This can be accomplished by adding the account to a read-only team in + the organization(s) where the main GitHub repository and all submodule + repositories are kept.
3.Next, remove the deploy key from the main GitHub repository. This can be done in the GitHub repository's "Deploy keys" Settings section.
4.Your automated build's deploy key is in the "Build Details" menu + under "Deploy keys".
5.In your dedicated GitHub User account, add the deploy key from your + Docker Hub Automated Build.
+ +### GitHub service hooks + +The GitHub Service hook allows GitHub to notify the Docker Hub when something has +been committed to that git repository. You will need to add the Service Hook manually +if your GitHub account is "Limited" linked to the Docker Hub. + +Follow the steps below to configure the GitHub Service hooks for your Automated Build: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
StepScreenshotDescription
1.Log in to GitHub.com, and go to your Repository page. Click on "Settings" on + the right side of the page. You must have admin privileges to the repository in order to do this.
2.Webhooks & ServicesClick on "Webhooks & Services" on the left side of the page.
3.Find the service labeled DockerFind the service labeled "Docker" (or click on "Add service") and click on it.
4.Activate Service HooksMake sure the "Active" checkbox is selected and click the "Update service" button to save your changes.
+ +## Automated Builds with Bitbucket + +In order to setup an Automated Build, you need to first link your +[Docker Hub](https://hub.docker.com) account with a Bitbucket account. +This will allow the registry to see your repositories. + +To get started, log into your Docker Hub account and click the +"+ Add Repository" button at the upper right of the screen. Then +select [Automated Build](https://registry.hub.docker.com/builds/add/). + +Select the [Bitbucket source]( +https://registry.hub.docker.com/associate/bitbucket/). + +Then follow the onscreen instructions to authorize and link your +Bitbucket account to Docker Hub. Once it is linked, you'll be able +to choose a repository from which to create the Automatic Build. + +### Creating an Automated Build + +You can [create an Automated Build]( +https://registry.hub.docker.com/builds/bitbucket/select/) from any of your +public or private Bitbucket repositories with a `Dockerfile`. + +### Adding a Hook + +When you link your Docker Hub account, a `POST` hook should get automatically +added to your Bitbucket repository. Follow the steps below to confirm or modify the +Bitbucket hooks for your Automated Build: + + + + + + + + + + + + + + + + + + + + + + + + +
StepScreenshotDescription
1.SettingsLog in to Bitbucket.org and go to your Repository page. Click on "Settings" on + the far left side of the page, under "Navigation". You must have admin privileges + to the repository in order to do this.
2.HooksClick on "Hooks" on the near left side of the page, under "Settings".
3.Docker Post HookYou should now see a list of hooks associated with the repo, including a POST hook that points at + registry.hub.docker.com/hooks/bitbucket.
+ + +## The Dockerfile and Automated Builds + +During the build process, Docker will copy the contents of your `Dockerfile`. +It will also add it to the [Docker Hub](https://hub.docker.com) for the Docker +community (for public repositories) or approved team members/orgs (for private +repositories) to see on the repository page. + +### README.md + +If you have a `README.md` file in your repository, it will be used as the +repository's full description.The build process will look for a +`README.md` in the same directory as your `Dockerfile`. + +> **Warning:** +> If you change the full description after a build, it will be +> rewritten the next time the Automated Build has been built. To make changes, +> modify the `README.md` from the Git repository. + +## Remote Build triggers + +If you need a way to trigger Automated Builds outside of GitHub or Bitbucket, +you can set up a build trigger. When you turn on the build trigger for an +Automated Build, it will give you a URL to which you can send POST requests. +This will trigger the Automated Build, much as with a GitHub webhook. + +Build triggers are available under the Settings menu of each Automated Build +repository on the Docker Hub. + +![Build trigger screen](/docker-hub/hub-images/build-trigger.png) + +You can use `curl` to trigger a build: + +``` +$ curl --data "build=true" -X POST https://registry.hub.docker.com/u/svendowideit/testhook/trigger/be579c +82-7c0e-11e4-81c4-0242ac110020/ +OK +``` + +> **Note:** +> You can only trigger one build at a time and no more than one +> every five minutes. If you already have a build pending, or if you +> recently submitted a build request, those requests *will be ignored*. +> To verify everything is working correctly, check the logs of last +> ten triggers on the settings page . + +## Webhooks + +Automated Builds also include a Webhooks feature. Webhooks can be called +after a successful repository push is made. This includes when a new tag is added +to an existing image. + +The webhook call will generate a HTTP POST with the following JSON +payload: + +``` +{ + "callback_url": "https://registry.hub.docker.com/u/svendowideit/testhook/hook/2141b5bi5i5b02bec211i4eeih0242eg11000a/", + "push_data": { + "images": [ + "27d47432a69bca5f2700e4dff7de0388ed65f9d3fb1ec645e2bc24c223dc1cc3", + "51a9c7c1f8bb2fa19bcd09789a34e63f35abb80044bc10196e304f6634cc582c", + ... + ], + "pushed_at": 1.417566161e+09, + "pusher": "trustedbuilder" + }, + "repository": { + "comment_count": 0, + "date_created": 1.417494799e+09, + "description": "", + "dockerfile": "#\n# BUILD\u0009\u0009docker build -t svendowideit/apt-cacher .\n# RUN\u0009\u0009docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher\n#\n# and then you can run containers with:\n# \u0009\u0009docker run -t -i -rm -e http_proxy http://192.168.1.2:3142/ debian bash\n#\nFROM\u0009\u0009ubuntu\nMAINTAINER\u0009SvenDowideit@home.org.au\n\n\nVOLUME\u0009\u0009[\"/var/cache/apt-cacher-ng\"]\nRUN\u0009\u0009apt-get update ; apt-get install -yq apt-cacher-ng\n\nEXPOSE \u0009\u00093142\nCMD\u0009\u0009chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*\n", + "full_description": "Docker Hub based automated build from a GitHub repo", + "is_official": false, + "is_private": true, + "is_trusted": true, + "name": "testhook", + "namespace": "svendowideit", + "owner": "svendowideit", + "repo_name": "svendowideit/testhook", + "repo_url": "https://registry.hub.docker.com/u/svendowideit/testhook/", + "star_count": 0, + "status": "Active" + } +} +``` + +Webhooks are available under the Settings menu of each Repository. +Use a tool like [requestb.in](http://requestb.in/) to test your webhook. + +> **Note**: The Docker Hub servers use an elastic IP range, so you can't +> filter requests by IP. + +### Webhook chains + +Webhook chains allow you to chain calls to multiple services. For example, +you can use this to trigger a deployment of your container only after +it has been successfully tested, then update a separate Changelog once the +deployment is complete. +After clicking the "Add webhook" button, simply add as many URLs as necessary +in your chain. + +The first webhook in a chain will be called after a successful push. Subsequent +URLs will be contacted after the callback has been validated. + +### Validating a callback + +In order to validate a callback in a webhook chain, you need to + +1. Retrieve the `callback_url` value in the request's JSON payload. +1. Send a POST request to this URL containing a valid JSON body. + +> **Note**: A chain request will only be considered complete once the last +> callback has been validated. + +To help you debug or simply view the results of your webhook(s), +view the "History" of the webhook available on its settings page. + +### Callback JSON data + +The following parameters are recognized in callback data: + +* `state` (required): Accepted values are `success`, `failure` and `error`. + If the state isn't `success`, the webhook chain will be interrupted. +* `description`: A string containing miscellaneous information that will be + available on the Docker Hub. Maximum 255 characters. +* `context`: A string containing the context of the operation. Can be retrieved + from the Docker Hub. Maximum 100 characters. +* `target_url`: The URL where the results of the operation can be found. Can be + retrieved on the Docker Hub. + +*Example callback payload:* + + { + "state": "success", + "description": "387 tests PASSED", + "context": "Continuous integration by Acme CI", + "target_url": "http://ci.acme.com/results/afd339c1c3d27" + } + +## Repository links + +Repository links are a way to associate one Automated Build with +another. If one gets updated, the linking system triggers a rebuild +for the other Automated Build. This makes it easy to keep all your +Automated Builds up to date. + +To add a link, go to the repository for the Automated Build you want to +link to and click on *Repository Links* under the Settings menu at +right. Then, enter the name of the repository that you want have linked. + +> **Warning:** +> You can add more than one repository link, however, you should +> do so very carefully. Creating a two way relationship between Automated Builds will +> cause an endless build loop. diff --git a/docs/docker-hub/home.md b/docs/docker-hub/home.md new file mode 100644 index 00000000..c2e44e17 --- /dev/null +++ b/docs/docker-hub/home.md @@ -0,0 +1,20 @@ + + +# The Docker Hub Registry help + +## Introduction + +For your questions about the [Docker Hub](https://hub.docker.com) registry you +can use [this documentation](docs.md). + +If you can not find something you are looking for, please feel free to +[contact us](https://docker.com/resources/support/). diff --git a/docs/docker-hub/hub-images/bb_hooks.png b/docs/docker-hub/hub-images/bb_hooks.png new file mode 100644 index 00000000..9efe4907 Binary files /dev/null and b/docs/docker-hub/hub-images/bb_hooks.png differ diff --git a/docs/docker-hub/hub-images/bb_menu.png b/docs/docker-hub/hub-images/bb_menu.png new file mode 100644 index 00000000..fba1a03a Binary files /dev/null and b/docs/docker-hub/hub-images/bb_menu.png differ diff --git a/docs/docker-hub/hub-images/bb_post-hook.png b/docs/docker-hub/hub-images/bb_post-hook.png new file mode 100644 index 00000000..53100dbb Binary files /dev/null and b/docs/docker-hub/hub-images/bb_post-hook.png differ diff --git a/docs/docker-hub/hub-images/build-trigger.png b/docs/docker-hub/hub-images/build-trigger.png new file mode 100644 index 00000000..02caf51a Binary files /dev/null and b/docs/docker-hub/hub-images/build-trigger.png differ diff --git a/docs/docker-hub/hub-images/dashboard.png b/docs/docker-hub/hub-images/dashboard.png new file mode 100644 index 00000000..924799dd Binary files /dev/null and b/docs/docker-hub/hub-images/dashboard.png differ diff --git a/docs/docker-hub/hub-images/deploy_key.png b/docs/docker-hub/hub-images/deploy_key.png new file mode 100644 index 00000000..f1d8d92d Binary files /dev/null and b/docs/docker-hub/hub-images/deploy_key.png differ diff --git a/docs/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png b/docs/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png new file mode 100644 index 00000000..0df38c69 Binary files /dev/null and b/docs/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png differ diff --git a/docs/docker-hub/hub-images/gh-check-user-org-dh-app-access.png b/docs/docker-hub/hub-images/gh-check-user-org-dh-app-access.png new file mode 100644 index 00000000..f24424af Binary files /dev/null and b/docs/docker-hub/hub-images/gh-check-user-org-dh-app-access.png differ diff --git a/docs/docker-hub/hub-images/gh_add_ssh_user_key.png b/docs/docker-hub/hub-images/gh_add_ssh_user_key.png new file mode 100644 index 00000000..3926fcf1 Binary files /dev/null and b/docs/docker-hub/hub-images/gh_add_ssh_user_key.png differ diff --git a/docs/docker-hub/hub-images/gh_docker-service.png b/docs/docker-hub/hub-images/gh_docker-service.png new file mode 100644 index 00000000..7a84c81b Binary files /dev/null and b/docs/docker-hub/hub-images/gh_docker-service.png differ diff --git a/docs/docker-hub/hub-images/gh_menu.png b/docs/docker-hub/hub-images/gh_menu.png new file mode 100644 index 00000000..84458a44 Binary files /dev/null and b/docs/docker-hub/hub-images/gh_menu.png differ diff --git a/docs/docker-hub/hub-images/gh_org_members.png b/docs/docker-hub/hub-images/gh_org_members.png new file mode 100644 index 00000000..465f5da5 Binary files /dev/null and b/docs/docker-hub/hub-images/gh_org_members.png differ diff --git a/docs/docker-hub/hub-images/gh_repo_deploy_key.png b/docs/docker-hub/hub-images/gh_repo_deploy_key.png new file mode 100644 index 00000000..983b5eec Binary files /dev/null and b/docs/docker-hub/hub-images/gh_repo_deploy_key.png differ diff --git a/docs/docker-hub/hub-images/gh_service_hook.png b/docs/docker-hub/hub-images/gh_service_hook.png new file mode 100644 index 00000000..c344c24a Binary files /dev/null and b/docs/docker-hub/hub-images/gh_service_hook.png differ diff --git a/docs/docker-hub/hub-images/gh_settings.png b/docs/docker-hub/hub-images/gh_settings.png new file mode 100644 index 00000000..2af9cb51 Binary files /dev/null and b/docs/docker-hub/hub-images/gh_settings.png differ diff --git a/docs/docker-hub/hub-images/gh_team_members.png b/docs/docker-hub/hub-images/gh_team_members.png new file mode 100644 index 00000000..3bdf4abd Binary files /dev/null and b/docs/docker-hub/hub-images/gh_team_members.png differ diff --git a/docs/docker-hub/hub-images/groups.png b/docs/docker-hub/hub-images/groups.png new file mode 100644 index 00000000..272ab23c Binary files /dev/null and b/docs/docker-hub/hub-images/groups.png differ diff --git a/docs/docker-hub/hub-images/hub.png b/docs/docker-hub/hub-images/hub.png new file mode 100644 index 00000000..354a9cde Binary files /dev/null and b/docs/docker-hub/hub-images/hub.png differ diff --git a/docs/docker-hub/hub-images/invite.png b/docs/docker-hub/hub-images/invite.png new file mode 100644 index 00000000..c58b4abf Binary files /dev/null and b/docs/docker-hub/hub-images/invite.png differ diff --git a/docs/docker-hub/hub-images/org-repo-collaborators.png b/docs/docker-hub/hub-images/org-repo-collaborators.png new file mode 100644 index 00000000..3e25475f Binary files /dev/null and b/docs/docker-hub/hub-images/org-repo-collaborators.png differ diff --git a/docs/docker-hub/hub-images/orgs.png b/docs/docker-hub/hub-images/orgs.png new file mode 100644 index 00000000..ffd368c2 Binary files /dev/null and b/docs/docker-hub/hub-images/orgs.png differ diff --git a/docs/docker-hub/hub-images/repos.png b/docs/docker-hub/hub-images/repos.png new file mode 100644 index 00000000..c07c3d1f Binary files /dev/null and b/docs/docker-hub/hub-images/repos.png differ diff --git a/docs/docker-hub/index.md b/docs/docker-hub/index.md new file mode 100644 index 00000000..42b1a4c8 --- /dev/null +++ b/docs/docker-hub/index.md @@ -0,0 +1,38 @@ + + +# Docker Hub + +The [Docker Hub](https://hub.docker.com) provides a cloud-based platform service +for distributed applications, including container image distribution and change +management, user and team collaboration, and lifecycle workflow automation. + +![DockerHub](/docker-hub/hub-images/hub.png) + +## [Finding and pulling images](./userguide.md) + +Find out how to [use the Docker Hub](./userguide.md) to find and pull Docker +images to run or build upon. + +## [Accounts](./accounts.md) + +[Learn how to create](./accounts.md) a Docker Hub +account and manage your organizations and groups. + +## [Your Repositories](./repos.md) + +Find out how to share your Docker images in [Docker Hub +repositories](./repos.md) and how to store and manage private images. + +## [Automated builds](./builds.md) + +Learn how to automate your build and deploy pipeline with [Automated +Builds](./builds.md) + diff --git a/docs/docker-hub/official_repos.md b/docs/docker-hub/official_repos.md new file mode 100644 index 00000000..0bb24865 --- /dev/null +++ b/docs/docker-hub/official_repos.md @@ -0,0 +1,113 @@ + + +# Official Repositories on Docker Hub + +The Docker [Official Repositories](http://registry.hub.docker.com/official) are +a curated set of Docker repositories that are promoted on Docker Hub. They are +designed to: + +* Provide essential base OS repositories (for example, + [`ubuntu`](https://registry.hub.docker.com/_/ubuntu/), + [`centos`](https://registry.hub.docker.com/_/centos/)) that serve as the + starting point for the majority of users. + +* Provide drop-in solutions for popular programming language runtimes, data + stores, and other services, similar to what a Platform-as-a-Service (PAAS) + would offer. + +* Exemplify [`Dockerfile` best practices](/articles/dockerfile_best-practices) + and provide clear documentation to serve as a reference for other `Dockerfile` + authors. + +* Ensure that security updates are applied in a timely manner. This is + particularly important as many Official Repositories are some of the most + popular on Docker Hub. + +* Provide a channel for software vendors to redistribute up-to-date and + supported versions of their products. Organization accounts on Docker Hub can + also serve this purpose, without the careful review or restrictions on what + can be published. + +Docker, Inc. sponsors a dedicated team that is responsible for reviewing and +publishing all Official Repositories content. This team works in collaboration +with upstream software maintainers, security experts, and the broader Docker +community. + +While it is preferable to have upstream software authors maintaining their +corresponding Official Repositories, this is not a strict requirement. Creating +and maintaining images for Official Repositories is a public process. It takes +place openly on GitHub where participation is encouraged. Anyone can provide +feedback, contribute code, suggest process changes, or even propose a new +Official Repository. + +## Should I use Official Repositories? + +New Docker users are encouraged to use the Official Repositories in their +projects. These repositories have clear documentation, promote best practices, +and are designed for the most common use cases. Advanced users are encouraged to +review the Official Repositories as part of their `Dockerfile` learning process. + +A common rationale for diverging from Official Repositories is to optimize for +image size. For instance, many of the programming language stack images contain +a complete build toolchain to support installation of modules that depend on +optimized code. An advanced user could build a custom image with just the +necessary pre-compiled libraries to save space. + +A number of language stacks such as +[`python`](https://registry.hub.docker.com/_/python/) and +[`ruby`](https://registry.hub.docker.com/_/ruby/) have `-slim` tag variants +designed to fill the need for optimization. Even when these "slim" variants are +insufficient, it is still recommended to inherit from an Official Repository +base OS image to leverage the ongoing maintenance work, rather than duplicating +these efforts. + +## How can I get involved? + +All Official Repositories contain a **User Feedback** section in their +documentation which covers the details for that specific repository. In most +cases, the GitHub repository which contains the Dockerfiles for an Official +Repository also has an active issue tracker. General feedback and support +questions should be directed to `#docker-library` on Freenode IRC. + +## How do I create a new Official Repository? + +From a high level, an Official Repository starts out as a proposal in the form +of a set of GitHub pull requests. You'll find detailed and objective proposal +requirements in the following GitHub repositories: + +* [docker-library/official-images](https://github.com/docker-library/official-images) + +* [docker-library/docs](https://github.com/docker-library/docs) + +The Official Repositories team, with help from community contributors, formally +review each proposal and provide feedback to the author. This initial review +process may require a bit of back and forth before the proposal is accepted. + +There are also subjective considerations during the review process. These +subjective concerns boil down to the basic question: "is this image generally +useful?" For example, the [`python`](https://registry.hub.docker.com/_/python/) +Official Repository is "generally useful" to the large Python developer +community, whereas an obscure text adventure game written in Python last week is +not. + +When a new proposal is accepted, the author becomes responsible for keeping +their images up-to-date and responding to user feedback. The Official +Repositories team becomes responsible for publishing the images and +documentation on Docker Hub. Updates to the Official Repository follow the same +pull request process, though with less review. The Official Repositories team +ultimately acts as a gatekeeper for all changes, which helps mitigate the risk +of quality and security issues from being introduced. + +> **Note**: If you are interested in proposing an Official Repository, but would +> like to discuss it with Docker, Inc. privately first, please send your +> inquiries to partners@docker.com. There is no fast-track or pay-for-status +> option. diff --git a/docs/docker-hub/repos.md b/docs/docker-hub/repos.md new file mode 100644 index 00000000..0bddc02b --- /dev/null +++ b/docs/docker-hub/repos.md @@ -0,0 +1,193 @@ + + +# Your Hub repositories + +Docker Hub repositories make it possible for you to share images with co-workers, +customers or the Docker community at large. If you're building your images internally, +either on your own Docker daemon, or using your own Continuous integration services, +you can push them to a Docker Hub repository that you add to your Docker Hub user or +organization account. + +Alternatively, if the source code for your Docker image is on GitHub or Bitbucket, +you can use an "Automated build" repository, which is built by the Docker Hub +services. See the [automated builds documentation](./builds.md) to read about +the extra functionality provided by those services. + +![repositories](/docker-hub/hub-images/repos.png) + +Your Docker Hub repositories have a number of useful features. + +## Stars + +Your repositories can be starred and you can star repositories in +return. Stars are a way to show that you like a repository. They are +also an easy way of bookmarking your favorites. + +## Comments + +You can interact with other members of the Docker community and maintainers by +leaving comments on repositories. If you find any comments that are not +appropriate, you can flag them for review. + +## Collaborators and their role + +A collaborator is someone you want to give access to a private +repository. Once designated, they can `push` and `pull` to your +repositories. They will not be allowed to perform any administrative +tasks such as deleting the repository or changing its status from +private to public. + +> **Note:** +> A collaborator cannot add other collaborators. Only the owner of +> the repository has administrative access. + +You can also assign more granular collaborator rights ("Read", "Write", or "Admin") +on Docker Hub by using organizations and groups. For more information +see the [accounts documentation](accounts/). + +## Private repositories + +Private repositories allow you to have repositories that contain images +that you want to keep private, either to your own account or within an +organization or group. + +To work with a private repository on [Docker +Hub](https://hub.docker.com), you will need to add one via the [Add +Repository](https://registry.hub.docker.com/account/repositories/add/) +link. You get one private repository for free with your Docker Hub +account. If you need more accounts you can upgrade your [Docker +Hub](https://registry.hub.docker.com/plans/) plan. + +Once the private repository is created, you can `push` and `pull` images +to and from it using Docker. + +> *Note:* You need to be signed in and have access to work with a +> private repository. + +Private repositories are just like public ones. However, it isn't +possible to browse them or search their content on the public registry. +They do not get cached the same way as a public repository either. + +It is possible to give access to a private repository to those whom you +designate (i.e., collaborators) from its Settings page. From there, you +can also switch repository status (*public* to *private*, or +vice-versa). You will need to have an available private repository slot +open before you can do such a switch. If you don't have any available, +you can always upgrade your [Docker +Hub](https://registry.hub.docker.com/plans/) plan. + +## Webhooks + +A webhook is an HTTP call-back triggered by a specific event. +You can use a Hub repository webhook to notify people, services, and other +applications after a new image is pushed to your repository (this also happens +for Automated builds). For example, you can trigger an automated test or +deployment to happen as soon as the image is available. + +To get started adding webhooks, go to the desired repository in the Hub, +and click "Webhooks" under the "Settings" box. +A webhook is called only after a successful `push` is +made. The webhook calls are HTTP POST requests with a JSON payload +similar to the example shown below. + +*Example webhook JSON payload:* + +``` +{ + "callback_url": "https://registry.hub.docker.com/u/svendowideit/busybox/hook/2141bc0cdec4hebec411i4c1g40242eg110020/", + "push_data": { + "images": [ + "27d47432a69bca5f2700e4dff7de0388ed65f9d3fb1ec645e2bc24c223dc1cc3", + "51a9c7c1f8bb2fa19bcd09789a34e63f35abb80044bc10196e304f6634cc582c", + ... + ], + "pushed_at": 1.417566822e+09, + "pusher": "svendowideit" + }, + "repository": { + "comment_count": 0, + "date_created": 1.417566665e+09, + "description": "", + "full_description": "webhook triggered from a 'docker push'", + "is_official": false, + "is_private": false, + "is_trusted": false, + "name": "busybox", + "namespace": "svendowideit", + "owner": "svendowideit", + "repo_name": "svendowideit/busybox", + "repo_url": "https://registry.hub.docker.com/u/svendowideit/busybox/", + "star_count": 0, + "status": "Active" +} +``` + + + +For testing, you can try an HTTP request tool like [requestb.in](http://requestb.in/). + +> **Note**: The Docker Hub servers use an elastic IP range, so you can't +> filter requests by IP. + +### Webhook chains + +Webhook chains allow you to chain calls to multiple services. For example, +you can use this to trigger a deployment of your container only after +it has been successfully tested, then update a separate Changelog once the +deployment is complete. +After clicking the "Add webhook" button, simply add as many URLs as necessary +in your chain. + +The first webhook in a chain will be called after a successful push. Subsequent +URLs will be contacted after the callback has been validated. + +### Validating a callback + +In order to validate a callback in a webhook chain, you need to + +1. Retrieve the `callback_url` value in the request's JSON payload. +1. Send a POST request to this URL containing a valid JSON body. + +> **Note**: A chain request will only be considered complete once the last +> callback has been validated. + +To help you debug or simply view the results of your webhook(s), +view the "History" of the webhook available on its settings page. + +#### Callback JSON data + +The following parameters are recognized in callback data: + +* `state` (required): Accepted values are `success`, `failure` and `error`. + If the state isn't `success`, the webhook chain will be interrupted. +* `description`: A string containing miscellaneous information that will be + available on the Docker Hub. Maximum 255 characters. +* `context`: A string containing the context of the operation. Can be retrieved + from the Docker Hub. Maximum 100 characters. +* `target_url`: The URL where the results of the operation can be found. Can be + retrieved on the Docker Hub. + +*Example callback payload:* + + { + "state": "success", + "description": "387 tests PASSED", + "context": "Continuous integration by Acme CI", + "target_url": "http://ci.acme.com/results/afd339c1c3d27" + } + +## Mark as unlisted + +By marking a repository as unlisted, you can create a publicly pullable repository +which will not be in the Hub or commandline search. This allows you to have a limited +release, but does not restrict access to anyone that is told, or guesses the repository +name. diff --git a/docs/docker-hub/userguide.md b/docs/docker-hub/userguide.md new file mode 100644 index 00000000..4a9bb73d --- /dev/null +++ b/docs/docker-hub/userguide.md @@ -0,0 +1,63 @@ + + +# Using the Docker Hub + +Docker Hub is used to find and pull Docker images to run or build upon, and to +distribute and build images for other users to use. + +![your profile](/docker-hub/hub-images/dashboard.png) + +## Finding repositories and images + +There are two ways you can search for public repositories and images available +on the Docker Hub. You can use the "Search" tool on the Docker Hub website, or +you can `search` for all the repositories and images using the Docker commandline +tool: + + $ docker search ubuntu + +Both will show you a list of the currently available public repositories on the +Docker Hub which match the provided keyword. + +If a repository is private or marked as unlisted, it won't be in the repository +search results. To see all the repositories you have access to and their statuses, +you can look at your profile page on [Docker Hub](https://hub.docker.com). + +## Pulling, running and building images + +You can find more information on [working with Docker images](../userguide/dockerimages.md). + +## Official Repositories + +The Docker Hub contains a number of [Official +Repositories](http://registry.hub.docker.com/official). These are +certified repositories from vendors and contributors to Docker. They +contain Docker images from vendors like Canonical, Oracle, and Red Hat +that you can use to build applications and services. + +If you use Official Repositories you know you're using an optimized and +up-to-date image to power your applications. + +> **Note:** +> If you would like to contribute an Official Repository for your +> organization, see [Official Repositories on Docker +> Hub](/docker-hub/official_repos) for more information. + +## Building and shipping your own repositories and images + +The Docker Hub provides you and your team with a place to build and ship Docker images. + +Collections of Docker images are managed using repositories - + +You can configure two types of repositories to manage on the Docker Hub: +[Repositories](./repos.md), which allow you to push images to the Hub from your local Docker daemon, +and [Automated Builds](./builds.md), which allow you to configure GitHub or Bitbucket to +trigger the Hub to rebuild repositories when changes are made to the repository. diff --git a/docs/examples/apt-cacher-ng.Dockerfile b/docs/examples/apt-cacher-ng.Dockerfile new file mode 100644 index 00000000..d1f76572 --- /dev/null +++ b/docs/examples/apt-cacher-ng.Dockerfile @@ -0,0 +1,15 @@ +# +# Build: docker build -t apt-cacher . +# Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher +# +# and then you can run containers with: +# docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash +# +FROM ubuntu +MAINTAINER SvenDowideit@docker.com + +VOLUME ["/var/cache/apt-cacher-ng"] +RUN apt-get update && apt-get install -y apt-cacher-ng + +EXPOSE 3142 +CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/* diff --git a/docs/examples/apt-cacher-ng.md b/docs/examples/apt-cacher-ng.md new file mode 100644 index 00000000..20cb977e --- /dev/null +++ b/docs/examples/apt-cacher-ng.md @@ -0,0 +1,113 @@ + + +# Dockerizing an apt-cacher-ng service + +> **Note**: +> - **If you don't like sudo** then see [*Giving non-root +> access*](/installation/binaries/#giving-non-root-access). +> - **If you're using OS X or docker via TCP** then you shouldn't use +> sudo. + +When you have multiple Docker servers, or build unrelated Docker +containers which can't make use of the Docker build cache, it can be +useful to have a caching proxy for your packages. This container makes +the second download of any package almost instant. + +Use the following Dockerfile: + + # + # Build: docker build -t apt-cacher . + # Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher + # + # and then you can run containers with: + # docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash + # + FROM ubuntu + MAINTAINER SvenDowideit@docker.com + + VOLUME ["/var/cache/apt-cacher-ng"] + RUN apt-get update && apt-get install -y apt-cacher-ng + + EXPOSE 3142 + CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/* + +To build the image using: + + $ docker build -t eg_apt_cacher_ng . + +Then run it, mapping the exposed port to one on the host + + $ docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng + +To see the logfiles that are `tailed` in the default command, you can +use: + + $ docker logs -f test_apt_cacher_ng + +To get your Debian-based containers to use the proxy, you can do one of +three things + +1. Add an apt Proxy setting + `echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy` +2. Set an environment variable: + `http_proxy=http://dockerhost:3142/` +3. Change your `sources.list` entries to start with + `http://dockerhost:3142/` + +**Option 1** injects the settings safely into your apt configuration in +a local version of a common base: + + FROM ubuntu + RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy + RUN apt-get update && apt-get install -y vim git + + # docker build -t my_ubuntu . + +**Option 2** is good for testing, but will break other HTTP clients +which obey `http_proxy`, such as `curl`, `wget` and others: + + $ docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash + +**Option 3** is the least portable, but there will be times when you +might need to do it and you can do it from your `Dockerfile` +too. + +Apt-cacher-ng has some tools that allow you to manage the repository, +and they can be used by leveraging the `VOLUME` +instruction, and the image we built to run the service: + + $ docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash + + $$ /usr/lib/apt-cacher-ng/distkill.pl + Scanning /var/cache/apt-cacher-ng, please wait... + Found distributions: + bla, taggedcount: 0 + 1. precise-security (36 index files) + 2. wheezy (25 index files) + 3. precise-updates (36 index files) + 4. precise (36 index files) + 5. wheezy-updates (18 index files) + + Found architectures: + 6. amd64 (36 index files) + 7. i386 (24 index files) + + WARNING: The removal action may wipe out whole directories containing + index files. Select d to see detailed list. + + (Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q + +Finally, clean up after your test by stopping and removing the +container, and then removing the image. + + $ docker stop test_apt_cacher_ng + $ docker rm test_apt_cacher_ng + $ docker rmi eg_apt_cacher_ng diff --git a/docs/examples/couchdb_data_volumes.md b/docs/examples/couchdb_data_volumes.md new file mode 100644 index 00000000..3c8f620f --- /dev/null +++ b/docs/examples/couchdb_data_volumes.md @@ -0,0 +1,49 @@ + + +# Dockerizing a CouchDB service + +> **Note**: +> - **If you don't like sudo** then see [*Giving non-root +> access*](/installation/binaries/#giving-non-root-access) + +Here's an example of using data volumes to share the same data between +two CouchDB containers. This could be used for hot upgrades, testing +different versions of CouchDB on the same data, etc. + +## Create first database + +Note that we're marking `/var/lib/couchdb` as a data volume. + + $ COUCH1=$(docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03) + +## Add data to the first database + +We're assuming your Docker host is reachable at `localhost`. If not, +replace `localhost` with the public IP of your Docker host. + + $ HOST=localhost + $ URL="http://$HOST:$(docker port $COUCH1 5984 | grep -o '[1-9][0-9]*$')/_utils/" + $ echo "Navigate to $URL in your browser, and use the couch interface to add data" + +## Create second database + +This time, we're requesting shared access to `$COUCH1`'s volumes. + + $ COUCH2=$(docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03) + +## Browse data on the second database + + $ HOST=localhost + $ URL="http://$HOST:$(docker port $COUCH2 5984 | grep -o '[1-9][0-9]*$')/_utils/" + $ echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!' + +Congratulations, you are now running two Couchdb containers, completely +isolated from each other *except* for their data. diff --git a/docs/examples/index.md b/docs/examples/index.md new file mode 100644 index 00000000..be52e3d0 --- /dev/null +++ b/docs/examples/index.md @@ -0,0 +1,23 @@ + + +# Examples + +This section contains the following: + +* [Dockerizing MongoDB](mongodb.md) +* [Dockerizing PostgreSQL](postgresql_service.md) +* [Dockerizing a CouchDB service](couchdb_data_volumes.md) +* [Dockerizing a Node.js web app](nodejs_web_app.md) +* [Dockerizing a Redis service](running_redis_service.md) +* [Dockerizing an apt-cacher-ng service](apt-cacher-ng.md) +* [Dockerizing applications: A 'Hello world'](/userguide/dockerizing) diff --git a/docs/examples/mongodb.md b/docs/examples/mongodb.md new file mode 100644 index 00000000..6c0974bb --- /dev/null +++ b/docs/examples/mongodb.md @@ -0,0 +1,179 @@ + + +# Dockerizing MongoDB + +## Introduction + +In this example, we are going to learn how to build a Docker image with +MongoDB pre-installed. We'll also see how to `push` that image to the +[Docker Hub registry](https://hub.docker.com) and share it with others! + +> **Note:** +> +> This guide will show the mechanics of building a MongoDB container, but +> you will probably want to use the official image on [Docker Hub]( https://registry.hub.docker.com/_/mongo/) + +Using Docker and containers for deploying [MongoDB](https://www.mongodb.org/) +instances will bring several benefits, such as: + + - Easy to maintain, highly configurable MongoDB instances; + - Ready to run and start working within milliseconds; + - Based on globally accessible and shareable images. + +> **Note:** +> +> If you do **_not_** like `sudo`, you might want to check out: +> [*Giving non-root access*](/installation/binaries/#giving-non-root-access). + +## Creating a Dockerfile for MongoDB + +Let's create our `Dockerfile` and start building it: + + $ nano Dockerfile + +Although optional, it is handy to have comments at the beginning of a +`Dockerfile` explaining its purpose: + + # Dockerizing MongoDB: Dockerfile for building MongoDB images + # Based on ubuntu:latest, installs MongoDB following the instructions from: + # http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ + +> **Tip:** `Dockerfile`s are flexible. However, they need to follow a certain +> format. The first item to be defined is the name of an image, which becomes +> the *parent* of your *Dockerized MongoDB* image. + +We will build our image using the latest version of Ubuntu from the +[Docker Hub Ubuntu](https://registry.hub.docker.com/_/ubuntu/) repository. + + # Format: FROM repository[:version] + FROM ubuntu:latest + +Continuing, we will declare the `MAINTAINER` of the `Dockerfile`: + + # Format: MAINTAINER Name + MAINTAINER M.Y. Name + +> **Note:** Although Ubuntu systems have MongoDB packages, they are likely to +> be outdated. Therefore in this example, we will use the official MongoDB +> packages. + +We will begin with importing the MongoDB public GPG key. We will also create +a MongoDB repository file for the package manager. + + # Installation: + # Import MongoDB public GPG key AND create a MongoDB list file + RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 + RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list + +After this initial preparation we can update our packages and install MongoDB. + + # Update apt-get sources AND install MongoDB + RUN apt-get update && apt-get install -y mongodb-org + +> **Tip:** You can install a specific version of MongoDB by using a list +> of required packages with versions, e.g.: +> +> RUN apt-get update && apt-get install -y mongodb-org=3.0.1 mongodb-org-server=3.0.1 mongodb-org-shell=3.0.1 mongodb-org-mongos=3.0.1 mongodb-org-tools=3.0.1 + +MongoDB requires a data directory. Let's create it as the final step of our +installation instructions. + + # Create the MongoDB data directory + RUN mkdir -p /data/db + +Lastly we set the `ENTRYPOINT` which will tell Docker to run `mongod` inside +the containers launched from our MongoDB image. And for ports, we will use +the `EXPOSE` instruction. + + # Expose port 27017 from the container to the host + EXPOSE 27017 + + # Set usr/bin/mongod as the dockerized entry-point application + ENTRYPOINT ["/usr/bin/mongod"] + +Now save the file and let's build our image. + +> **Note:** +> +> The full version of this `Dockerfile` can be found [here](/examples/mongodb/Dockerfile). + +## Building the MongoDB Docker image + +With our `Dockerfile`, we can now build the MongoDB image using Docker. Unless +experimenting, it is always a good practice to tag Docker images by passing the +`--tag` option to `docker build` command. + + # Format: docker build --tag/-t / . + # Example: + $ docker build --tag my/repo . + +Once this command is issued, Docker will go through the `Dockerfile` and build +the image. The final image will be tagged `my/repo`. + +## Pushing the MongoDB image to Docker Hub + +All Docker image repositories can be hosted and shared on +[Docker Hub](https://hub.docker.com) with the `docker push` command. For this, +you need to be logged-in. + + # Log-in + $ docker login + Username: + .. + + # Push the image + # Format: docker push / + $ docker push my/repo + The push refers to a repository [my/repo] (len: 1) + Sending image list + Pushing repository my/repo (1 tags) + .. + +## Using the MongoDB image + +Using the MongoDB image we created, we can run one or more MongoDB instances +as daemon process(es). + + # Basic way + # Usage: docker run --name -d / + $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo + + # Dockerized MongoDB, lean and mean! + # Usage: docker run --name -d / --noprealloc --smallfiles + $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo --noprealloc --smallfiles + + # Checking out the logs of a MongoDB container + # Usage: docker logs + $ docker logs mongo_instance_001 + + # Playing with MongoDB + # Usage: mongo --port + $ mongo --port 27017 + + # If using boot2docker + # Usage: mongo --port --host + $ mongo --port 27017 --host 192.168.59.103 + +> **Tip:** +If you want to run two containers on the same engine, then you will need to map +the exposed port to two different ports on the host + + # Start two containers and map the ports + $ docker run -p 28001:27017 --name mongo_instance_001 -d my/repo + $ docker run -p 28002:27017 --name mongo_instance_002 -d my/repo + + # Now you can connect to each MongoDB instance on the two ports + $ mongo --port 28001 + $ mongo --port 28002 + + - [Linking containers](/userguide/dockerlinks) + - [Cross-host linking containers](/articles/ambassador_pattern_linking/) + - [Creating an Automated Build](/docker-io/builds/#automated-builds) diff --git a/docs/examples/mongodb/Dockerfile b/docs/examples/mongodb/Dockerfile new file mode 100644 index 00000000..3513da47 --- /dev/null +++ b/docs/examples/mongodb/Dockerfile @@ -0,0 +1,22 @@ +# Dockerizing MongoDB: Dockerfile for building MongoDB images +# Based on ubuntu:latest, installs MongoDB following the instructions from: +# http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ + +FROM ubuntu:latest +MAINTAINER Docker + +# Installation: +# Import MongoDB public GPG key AND create a MongoDB list file +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 +RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list +# Update apt-get sources AND install MongoDB +RUN apt-get update && apt-get install -y mongodb-org + +# Create the MongoDB data directory +RUN mkdir -p /data/db + +# Expose port #27017 from the container to the host +EXPOSE 27017 + +# Set /usr/bin/mongod as the dockerized entry-point application +ENTRYPOINT ["/usr/bin/mongod"] diff --git a/docs/examples/nodejs_web_app.md b/docs/examples/nodejs_web_app.md new file mode 100644 index 00000000..f605f29c --- /dev/null +++ b/docs/examples/nodejs_web_app.md @@ -0,0 +1,197 @@ + + +# Dockerizing a Node.js web app + +> **Note**: +> - **If you don't like sudo** then see [*Giving non-root +> access*](/installation/binaries/#giving-non-root-access) + +The goal of this example is to show you how you can build your own +Docker images from a parent image using a `Dockerfile` +. We will do that by making a simple Node.js hello world web +application running on CentOS. You can get the full source code at +[https://github.com/enokd/docker-node-hello/](https://github.com/enokd/docker-node-hello/). + +## Create Node.js app + +First, create a directory `src` where all the files +would live. Then create a `package.json` file that +describes your app and its dependencies: + + { + "name": "docker-centos-hello", + "private": true, + "version": "0.0.1", + "description": "Node.js Hello world app on CentOS using docker", + "author": "Daniel Gasienica ", + "dependencies": { + "express": "3.2.4" + } + } + +Then, create an `index.js` file that defines a web +app using the [Express.js](http://expressjs.com/) framework: + + var express = require('express'); + + // Constants + var PORT = 8080; + + // App + var app = express(); + app.get('/', function (req, res) { + res.send('Hello world\n'); + }); + + app.listen(PORT); + console.log('Running on http://localhost:' + PORT); + +In the next steps, we'll look at how you can run this app inside a +CentOS container using Docker. First, you'll need to build a Docker +image of your app. + +## Creating a Dockerfile + +Create an empty file called `Dockerfile`: + + touch Dockerfile + +Open the `Dockerfile` in your favorite text editor + +Define the parent image you want to use to build your own image on +top of. Here, we'll use +[CentOS](https://registry.hub.docker.com/_/centos/) (tag: `centos6`) +available on the [Docker Hub](https://hub.docker.com/): + + FROM centos:centos6 + +Since we're building a Node.js app, you'll have to install Node.js as +well as npm on your CentOS image. Node.js is required to run your app +and npm to install your app's dependencies defined in +`package.json`. To install the right package for +CentOS, we'll use the instructions from the [Node.js wiki]( +https://github.com/joyent/node/wiki/Installing-Node.js- +via-package-manager#rhelcentosscientific-linux-6): + + # Enable EPEL for Node.js + RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm + # Install Node.js and npm + RUN yum install -y npm + +To bundle your app's source code inside the Docker image, use the `COPY` +instruction: + + # Bundle app source + COPY . /src + +Install your app dependencies using the `npm` binary: + + # Install app dependencies + RUN cd /src; npm install + +Your app binds to port `8080` so you'll use the` EXPOSE` instruction to have +it mapped by the `docker` daemon: + + EXPOSE 8080 + +Last but not least, define the command to run your app using `CMD` which +defines your runtime, i.e. `node`, and the path to our app, i.e. `src/index.js` +(see the step where we added the source to the container): + + CMD ["node", "/src/index.js"] + +Your `Dockerfile` should now look like this: + + FROM centos:centos6 + + # Enable EPEL for Node.js + RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm + # Install Node.js and npm + RUN yum install -y npm + + # Bundle app source + COPY . /src + # Install app dependencies + RUN cd /src; npm install + + EXPOSE 8080 + CMD ["node", "/src/index.js"] + +## Building your image + +Go to the directory that has your `Dockerfile` and run the following command +to build a Docker image. The `-t` flag lets you tag your image so it's easier +to find later using the `docker images` command: + + $ docker build -t /centos-node-hello . + +Your image will now be listed by Docker: + + $ docker images + + # Example + REPOSITORY TAG ID CREATED + centos centos6 539c0211cd76 8 weeks ago + /centos-node-hello latest d64d3505b0d2 2 hours ago + +## Run the image + +Running your image with `-d` runs the container in detached mode, leaving the +container running in the background. The `-p` flag redirects a public port to +a private port in the container. Run the image you previously built: + + $ docker run -p 49160:8080 -d /centos-node-hello + +Print the output of your app: + + # Get container ID + $ docker ps + + # Print app output + $ docker logs + + # Example + Running on http://localhost:8080 + +## Test + +To test your app, get the port of your app that Docker mapped: + + $ docker ps + + # Example + ID IMAGE COMMAND ... PORTS + ecce33b30ebf /centos-node-hello:latest node /src/index.js 49160->8080 + +In the example above, Docker mapped the `8080` port of the container to `49160`. + +Now you can call your app using `curl` (install if needed via: +`sudo apt-get install curl`): + + $ curl -i localhost:49160 + + HTTP/1.1 200 OK + X-Powered-By: Express + Content-Type: text/html; charset=utf-8 + Content-Length: 12 + Date: Sun, 02 Jun 2013 03:53:22 GMT + Connection: keep-alive + + Hello world + +If you use Boot2docker on OS X, the port is actually mapped to the Docker host VM, +and you should use the following command: + + $ curl $(boot2docker ip):49160 + +We hope this tutorial helped you get up and running with Node.js and +CentOS on Docker. You can get the full source code at +[https://github.com/enokd/docker-node-hello/](https://github.com/enokd/docker-node-hello/). diff --git a/docs/examples/postgresql_service.Dockerfile b/docs/examples/postgresql_service.Dockerfile new file mode 100644 index 00000000..d5767c93 --- /dev/null +++ b/docs/examples/postgresql_service.Dockerfile @@ -0,0 +1,49 @@ +# +# example Dockerfile for https://docs.docker.com/examples/postgresql_service/ +# + +FROM ubuntu +MAINTAINER SvenDowideit@docker.com + +# Add the PostgreSQL PGP key to verify their Debian packages. +# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc +RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 + +# Add PostgreSQL's repository. It contains the most recent stable release +# of PostgreSQL, ``9.3``. +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list + +# Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 +# There are some warnings (in red) that show up during the build. You can hide +# them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 + +# Note: The official Debian and Ubuntu images automatically ``apt-get clean`` +# after each ``apt-get`` + +# Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` +USER postgres + +# Create a PostgreSQL role named ``docker`` with ``docker`` as the password and +# then create a database `docker` owned by the ``docker`` role. +# Note: here we use ``&&\`` to run commands one after the other - the ``\`` +# allows the RUN command to span multiple lines. +RUN /etc/init.d/postgresql start &&\ + psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\ + createdb -O docker docker + +# Adjust PostgreSQL configuration so that remote connections to the +# database are possible. +RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf + +# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf`` +RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf + +# Expose the PostgreSQL port +EXPOSE 5432 + +# Add VOLUMEs to allow backup of config, logs and databases +VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] + +# Set the default command to run when starting the container +CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] diff --git a/docs/examples/postgresql_service.md b/docs/examples/postgresql_service.md new file mode 100644 index 00000000..968cf773 --- /dev/null +++ b/docs/examples/postgresql_service.md @@ -0,0 +1,153 @@ + + +# Dockerizing PostgreSQL + +> **Note**: +> - **If you don't like sudo** then see [*Giving non-root +> access*](/installation/binaries/#giving-non-root-access) + +## Installing PostgreSQL on Docker + +Assuming there is no Docker image that suits your needs on the [Docker +Hub](http://hub.docker.com), you can create one yourself. + +Start by creating a new `Dockerfile`: + +> **Note**: +> This PostgreSQL setup is for development-only purposes. Refer to the +> PostgreSQL documentation to fine-tune these settings so that it is +> suitably secure. + + # + # example Dockerfile for https://docs.docker.com/examples/postgresql_service/ + # + + FROM ubuntu + MAINTAINER SvenDowideit@docker.com + + # Add the PostgreSQL PGP key to verify their Debian packages. + # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc + RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 + + # Add PostgreSQL's repository. It contains the most recent stable release + # of PostgreSQL, ``9.3``. + RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list + + # Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 + # There are some warnings (in red) that show up during the build. You can hide + # them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive + RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 + + # Note: The official Debian and Ubuntu images automatically ``apt-get clean`` + # after each ``apt-get`` + + # Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` + USER postgres + + # Create a PostgreSQL role named ``docker`` with ``docker`` as the password and + # then create a database `docker` owned by the ``docker`` role. + # Note: here we use ``&&\`` to run commands one after the other - the ``\`` + # allows the RUN command to span multiple lines. + RUN /etc/init.d/postgresql start &&\ + psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\ + createdb -O docker docker + + # Adjust PostgreSQL configuration so that remote connections to the + # database are possible. + RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf + + # And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf`` + RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf + + # Expose the PostgreSQL port + EXPOSE 5432 + + # Add VOLUMEs to allow backup of config, logs and databases + VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] + + # Set the default command to run when starting the container + CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] + +Build an image from the Dockerfile assign it a name. + + $ docker build -t eg_postgresql . + +And run the PostgreSQL server container (in the foreground): + + $ docker run --rm -P --name pg_test eg_postgresql + +There are 2 ways to connect to the PostgreSQL server. We can use [*Link +Containers*](/userguide/dockerlinks), or we can access it from our host +(or the network). + +> **Note**: +> The `--rm` removes the container and its image when +> the container exits successfully. + +### Using container linking + +Containers can be linked to another container's ports directly using +`-link remote_name:local_alias` in the client's +`docker run`. This will set a number of environment +variables that can then be used to connect: + + $ docker run --rm -t -i --link pg_test:pg eg_postgresql bash + + postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password + +### Connecting from your host system + +Assuming you have the postgresql-client installed, you can use the +host-mapped port to test as well. You need to use `docker ps` +to find out what local host port the container is mapped to +first: + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test + $ psql -h localhost -p 49153 -d docker -U docker --password + +### Testing the database + +Once you have authenticated and have a `docker =#` +prompt, you can create a table and populate it. + + psql (9.3.1) + Type "help" for help. + + $ docker=# CREATE TABLE cities ( + docker(# name varchar(80), + docker(# location point + docker(# ); + CREATE TABLE + $ docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); + INSERT 0 1 + $ docker=# select * from cities; + name | location + ---------------+----------- + San Francisco | (-194,53) + (1 row) + +### Using the container volumes + +You can use the defined volumes to inspect the PostgreSQL log files and +to backup your configuration and data: + + $ docker run --rm --volumes-from pg_test -t -i busybox sh + + / # ls + bin etc lib linuxrc mnt proc run sys usr + dev home lib64 media opt root sbin tmp var + / # ls /etc/postgresql/9.3/main/ + environment pg_hba.conf postgresql.conf + pg_ctl.conf pg_ident.conf start.conf + /tmp # ls /var/log + ldconfig postgresql diff --git a/docs/examples/running_redis_service.md b/docs/examples/running_redis_service.md new file mode 100644 index 00000000..d8b673bd --- /dev/null +++ b/docs/examples/running_redis_service.md @@ -0,0 +1,89 @@ + + +# Dockerizing a Redis service + +Very simple, no frills, Redis service attached to a web application +using a link. + +## Create a Docker container for Redis + +Firstly, we create a `Dockerfile` for our new Redis +image. + + FROM ubuntu:14.04 + RUN apt-get update && apt-get install -y redis-server + EXPOSE 6379 + ENTRYPOINT ["/usr/bin/redis-server"] + +Next we build an image from our `Dockerfile`. +Replace `` with your own user name. + + $ docker build -t /redis . + +## Run the service + +Use the image we've just created and name your container `redis`. + +Running the service with `-d` runs the container in detached mode, leaving +the container running in the background. + +Importantly, we're not exposing any ports on our container. Instead +we're going to use a container link to provide access to our Redis +database. + + $ docker run --name redis -d /redis + +## Create your web application container + +Next we can create a container for our application. We're going to use +the `-link` flag to create a link to the `redis` container we've just +created with an alias of `db`. This will create a secure tunnel to the +`redis` container and expose the Redis instance running inside that +container to only this container. + + $ docker run --link redis:db -i -t ubuntu:14.04 /bin/bash + +Once inside our freshly created container we need to install Redis to +get the `redis-cli` binary to test our connection. + + $ sudo apt-get update + $ sudo apt-get install redis-server + $ sudo service redis-server stop + +As we've used the `--link redis:db` option, Docker +has created some environment variables in our web application container. + + $ env | grep DB_ + + # Should return something similar to this with your values + DB_NAME=/violet_wolf/db + DB_PORT_6379_TCP_PORT=6379 + DB_PORT=tcp://172.17.0.33:6379 + DB_PORT_6379_TCP=tcp://172.17.0.33:6379 + DB_PORT_6379_TCP_ADDR=172.17.0.33 + DB_PORT_6379_TCP_PROTO=tcp + +We can see that we've got a small list of environment variables prefixed +with `DB`. The `DB` comes from the link alias specified when we launched +the container. Let's use the `DB_PORT_6379_TCP_ADDR` variable to connect to +our Redis container. + + $ redis-cli -h $DB_PORT_6379_TCP_ADDR + $ redis 172.17.0.33:6379> + $ redis 172.17.0.33:6379> set docker awesome + OK + $ redis 172.17.0.33:6379> get docker + "awesome" + $ redis 172.17.0.33:6379> exit + +We could easily use this or other environment variables in our web +application to make a connection to our `redis` +container. diff --git a/docs/examples/running_riak_service.Dockerfile b/docs/examples/running_riak_service.Dockerfile new file mode 100644 index 00000000..1051c1a4 --- /dev/null +++ b/docs/examples/running_riak_service.Dockerfile @@ -0,0 +1,31 @@ +# Riak +# +# VERSION 0.1.1 + +# Use the Ubuntu base image provided by dotCloud +FROM ubuntu:trusty +MAINTAINER Hector Castro hector@basho.com + +# Install Riak repository before we do apt-get update, so that update happens +# in a single step +RUN apt-get install -q -y curl && \ + curl -sSL https://packagecloud.io/install/repositories/basho/riak/script.deb | sudo bash + +# Install and setup project dependencies +RUN apt-get update && \ + apt-get install -y supervisor riak=2.0.5-1 + +RUN mkdir -p /var/log/supervisor + +RUN locale-gen en_US en_US.UTF-8 + +COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +# Configure Riak to accept connections from any host +RUN sed -i "s|listener.http.internal = 127.0.0.1:8098|listener.http.internal = 0.0.0.0:8098|" /etc/riak/riak.conf +RUN sed -i "s|listener.protobuf.internal = 127.0.0.1:8087|listener.protobuf.internal = 0.0.0.0:8087|" /etc/riak/riak.conf + +# Expose Riak Protocol Buffers and HTTP interfaces +EXPOSE 8087 8098 + +CMD ["/usr/bin/supervisord"] diff --git a/docs/examples/running_riak_service.md b/docs/examples/running_riak_service.md new file mode 100644 index 00000000..b42a1af9 --- /dev/null +++ b/docs/examples/running_riak_service.md @@ -0,0 +1,108 @@ + + +# Dockerizing a Riak service + +The goal of this example is to show you how to build a Docker image with +Riak pre-installed. + +## Creating a Dockerfile + +Create an empty file called `Dockerfile`: + + $ touch Dockerfile + +Next, define the parent image you want to use to build your image on top +of. We'll use [Ubuntu](https://registry.hub.docker.com/_/ubuntu/) (tag: +`trusty`), which is available on [Docker Hub](https://hub.docker.com): + + # Riak + # + # VERSION 0.1.1 + + # Use the Ubuntu base image provided by dotCloud + FROM ubuntu:trusty + MAINTAINER Hector Castro hector@basho.com + +After that, we install the curl which is used to download the repository setup +script and we download the setup script and run it. + + # Install Riak repository before we do apt-get update, so that update happens + # in a single step + RUN apt-get install -q -y curl && \ + curl -sSL https://packagecloud.io/install/repositories/basho/riak/script.deb | sudo bash + +Then we install and setup a few dependencies: + + - `supervisor` is used manage the Riak processes + - `riak=2.0.5-1` is the Riak package coded to version 2.0.5 + + + + # Install and setup project dependencies + RUN apt-get update && \ + apt-get install -y supervisor riak=2.0.5-1 + + RUN mkdir -p /var/log/supervisor + + RUN locale-gen en_US en_US.UTF-8 + + COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +After that, we modify Riak's configuration: + + # Configure Riak to accept connections from any host + RUN sed -i "s|listener.http.internal = 127.0.0.1:8098|listener.http.internal = 0.0.0.0:8098|" /etc/riak/riak.conf + RUN sed -i "s|listener.protobuf.internal = 127.0.0.1:8087|listener.protobuf.internal = 0.0.0.0:8087|" /etc/riak/riak.conf + +Then, we expose the Riak Protocol Buffers and HTTP interfaces: + + # Expose Riak Protocol Buffers and HTTP interfaces + EXPOSE 8087 8098 + +Finally, run `supervisord` so that Riak is started: + + CMD ["/usr/bin/supervisord"] + +## Create a supervisord configuration file + +Create an empty file called `supervisord.conf`. Make +sure it's at the same directory level as your `Dockerfile`: + + touch supervisord.conf + +Populate it with the following program definitions: + + [supervisord] + nodaemon=true + + [program:riak] + command=bash -c "/usr/sbin/riak console" + numprocs=1 + autostart=true + autorestart=true + user=riak + environment=HOME="/var/lib/riak" + stdout_logfile=/var/log/supervisor/%(program_name)s.log + stderr_logfile=/var/log/supervisor/%(program_name)s.log + +## Build the Docker image for Riak + +Now you should be able to build a Docker image for Riak: + + $ docker build -t "/riak" . + +## Next steps + +Riak is a distributed database. Many production deployments consist of +[at least five nodes]( +http://basho.com/why-your-riak-cluster-should-have-at-least-five-nodes/). +See the [docker-riak](https://github.com/hectcastro/docker-riak) project +details on how to deploy a Riak cluster using Docker and Pipework. diff --git a/docs/examples/running_ssh_service.Dockerfile b/docs/examples/running_ssh_service.Dockerfile new file mode 100644 index 00000000..7aba7f68 --- /dev/null +++ b/docs/examples/running_ssh_service.Dockerfile @@ -0,0 +1,20 @@ +# sshd +# +# VERSION 0.0.2 + +FROM ubuntu:14.04 +MAINTAINER Sven Dowideit + +RUN apt-get update && apt-get install -y openssh-server +RUN mkdir /var/run/sshd +RUN echo 'root:screencast' | chpasswd +RUN sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config + +# SSH login fix. Otherwise user is kicked off after login +RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd + +ENV NOTVISIBLE "in users profile" +RUN echo "export VISIBLE=now" >> /etc/profile + +EXPOSE 22 +CMD ["/usr/sbin/sshd", "-D"] diff --git a/docs/examples/running_ssh_service.md b/docs/examples/running_ssh_service.md new file mode 100644 index 00000000..c22e510c --- /dev/null +++ b/docs/examples/running_ssh_service.md @@ -0,0 +1,84 @@ + + +# Dockerizing an SSH daemon service + +## Build an `eg_sshd` image + +The following `Dockerfile` sets up an SSHd service in a container that you +can use to connect to and inspect other container's volumes, or to get +quick access to a test container. + + # sshd + # + # VERSION 0.0.2 + + FROM ubuntu:14.04 + MAINTAINER Sven Dowideit + + RUN apt-get update && apt-get install -y openssh-server + RUN mkdir /var/run/sshd + RUN echo 'root:screencast' | chpasswd + RUN sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config + + # SSH login fix. Otherwise user is kicked off after login + RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd + + ENV NOTVISIBLE "in users profile" + RUN echo "export VISIBLE=now" >> /etc/profile + + EXPOSE 22 + CMD ["/usr/sbin/sshd", "-D"] + +Build the image using: + + $ docker build -t eg_sshd . + +## Run a `test_sshd` container + +Then run it. You can then use `docker port` to find out what host port +the container's port 22 is mapped to: + + $ docker run -d -P --name test_sshd eg_sshd + $ docker port test_sshd 22 + 0.0.0.0:49154 + +And now you can ssh as `root` on the container's IP address (you can find it +with `docker inspect`) or on port `49154` of the Docker daemon's host IP address +(`ip address` or `ifconfig` can tell you that) or `localhost` if on the +Docker daemon host: + + $ ssh root@192.168.1.2 -p 49154 + # The password is ``screencast``. + $$ + +## Environment variables + +Using the `sshd` daemon to spawn shells makes it complicated to pass environment +variables to the user's shell via the normal Docker mechanisms, as `sshd` scrubs +the environment before it starts the shell. + +If you're setting values in the `Dockerfile` using `ENV`, you'll need to push them +to a shell initialization file like the `/etc/profile` example in the `Dockerfile` +above. + +If you need to pass`docker run -e ENV=value` values, you will need to write a +short script to do the same before you start `sshd -D` and then replace the +`CMD` with that script. + +## Clean up + +Finally, clean up after your test by stopping and removing the +container, and then removing the image. + + $ docker stop test_sshd + $ docker rm test_sshd + $ docker rmi eg_sshd + diff --git a/docs/examples/supervisord.conf b/docs/examples/supervisord.conf new file mode 100644 index 00000000..385fbe7a --- /dev/null +++ b/docs/examples/supervisord.conf @@ -0,0 +1,12 @@ +[supervisord] +nodaemon=true + +[program:riak] +command=bash -c "/usr/sbin/riak console" +numprocs=1 +autostart=true +autorestart=true +user=riak +environment=HOME="/var/lib/riak" +stdout_logfile=/var/log/supervisor/%(program_name)s.log +stderr_logfile=/var/log/supervisor/%(program_name)s.log diff --git a/docs/extend/index.md b/docs/extend/index.md new file mode 100644 index 00000000..61f8689d --- /dev/null +++ b/docs/extend/index.md @@ -0,0 +1,22 @@ + + + +## Extending Docker + +Currently, you can extend Docker by adding a plugin. This section contains the following topics: + +* [Understand Docker plugins](plugins.md) +* [Write a volume plugin](plugins_volumes.md) +* [Docker plugin API](plugin_api.md) + + \ No newline at end of file diff --git a/docs/extend/plugin_api.md b/docs/extend/plugin_api.md new file mode 100644 index 00000000..8e2862f6 --- /dev/null +++ b/docs/extend/plugin_api.md @@ -0,0 +1,135 @@ + + +# Docker Plugin API + +Docker plugins are out-of-process extensions which add capabilities to the +Docker Engine. + +This page is intended for people who want to develop their own Docker plugin. +If you just want to learn about or use Docker plugins, look +[here](plugins.md). + +## What plugins are + +A plugin is a process running on the same docker host as the docker daemon, +which registers itself by placing a file in one of the plugin directories described in [Plugin discovery](#plugin-discovery). + +Plugins have human-readable names, which are short, lowercase strings. For +example, `flocker` or `weave`. + +Plugins can run inside or outside containers. Currently running them outside +containers is recommended. + +## Plugin discovery + +Docker discovers plugins by looking for them in the plugin directory whenever a +user or container tries to use one by name. + +There are three types of files which can be put in the plugin directory. + +* `.sock` files are UNIX domain sockets. +* `.spec` files are text files containing a URL, such as `unix:///other.sock`. +* `.json` files are text files containing a full json specification for the plugin. + +UNIX domain socket files must be located under `/run/docker/plugins`, whereas +spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. + +The name of the file (excluding the extension) determines the plugin name. + +For example, the `flocker` plugin might create a UNIX socket at +`/run/docker/plugins/flocker.sock`. + +You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. +For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only +mount `/run/docker/plugins/flocker` inside the `flocker` container. + +Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under +`/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as +soon as it finds the first plugin definition with the given name. + +### JSON specification + +This is the JSON format for a plugin: + +```json +{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "InsecureSkipVerify": false, + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem", + } +} +``` + +The `TLSConfig` field is optional and TLS will only be verified if this configuration is present. + +## Plugin lifecycle + +Plugins should be started before Docker, and stopped after Docker. For +example, when packaging a plugin for a platform which supports `systemd`, you +might use [`systemd` dependencies]( +http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to +manage startup and shutdown order. + +When upgrading a plugin, you should first stop the Docker daemon, upgrade the +plugin, then start Docker again. + +## Plugin activation + +When a plugin is first referred to -- either by a user referring to it by name +(e.g. `docker run --volume-driver=foo`) or a container already configured to +use a plugin being started -- Docker looks for the named plugin in the plugin +directory and activates it with a handshake. See Handshake API below. + +Plugins are *not* activated automatically at Docker daemon startup. Rather, +they are activated only lazily, or on-demand, when they are needed. + +## API design + +The Plugin API is RPC-style JSON over HTTP, much like webhooks. + +Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to +implement an HTTP server and bind this to the UNIX socket mentioned in the +"plugin discovery" section. + +All requests are HTTP `POST` requests. + +The API is versioned via an Accept header, which currently is always set to +`application/vnd.docker.plugins.v1+json`. + +## Handshake API + +Plugins are activated via the following "handshake" API call. + +### /Plugin.Activate + +**Request:** empty body + +**Response:** +``` +{ + "Implements": ["VolumeDriver"] +} +``` + +Responds with a list of Docker subsystems which this plugin implements. +After activation, the plugin will then be sent events from this subsystem. + +## Plugin retries + +Attempts to call a method on a plugin are retried with an exponential backoff +for up to 30 seconds. This may help when packaging plugins as containers, since +it gives plugin containers a chance to start up before failing any user +containers which depend on them. diff --git a/docs/extend/plugins.md b/docs/extend/plugins.md new file mode 100644 index 00000000..6bfb0053 --- /dev/null +++ b/docs/extend/plugins.md @@ -0,0 +1,65 @@ + + +# Understand Docker plugins + +You can extend the capabilities of the Docker Engine by loading third-party +plugins. + +## Types of plugins + +Plugins extend Docker's functionality. They come in specific types. For +example, a [volume plugin](plugins_volume.md) might enable Docker +volumes to persist across multiple Docker hosts. + +Currently Docker supports volume and network driver plugins. In the future it +will support additional plugin types. + +## Installing a plugin + +Follow the instructions in the plugin's documentation. + +## Finding a plugin + +The following plugins exist: + +* The [Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) + is a volume plugin that provides access to an extensible set of + container-based persistent storage options. It supports single and multi-host Docker + environments with features that include tenant isolation, automated + provisioning, encryption, secure deletion, snapshots and QoS. + +* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin + which provides multi-host portable volumes for Docker, enabling you to run + databases and other stateful containers and move them around across a cluster + of machines. + +* The [GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) is + another volume plugin that provides multi-host volumes management for Docker + using GlusterFS. + +* The [Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) is + a plugin that provides credentials and secret management using Keywhiz as + a central repository. + +* The [REX-Ray plugin](https://github.com/emccode/rexraycli) is a volume plugin + which is written in Go and provides advanced storage functionality for many + platforms including EC2, OpenStack, XtremIO, and ScaleIO. + +## Troubleshooting a plugin + +If you are having problems with Docker after loading a plugin, ask the authors +of the plugin for help. The Docker team may not be able to assist you. + +## Writing a plugin + +If you are interested in writing a plugin for Docker, or seeing how they work +under the hood, see the [docker plugins reference](plugin_api.md). diff --git a/docs/extend/plugins_volume.md b/docs/extend/plugins_volume.md new file mode 100644 index 00000000..e9dc1eba --- /dev/null +++ b/docs/extend/plugins_volume.md @@ -0,0 +1,158 @@ + + +# Write a volume plugin + +Docker volume plugins enable Docker deployments to be integrated with external +storage systems, such as Amazon EBS, and enable data volumes to persist beyond +the lifetime of a single Docker host. See the [plugin documentation](plugins.md) +for more information. + +# Command-line changes + +A volume plugin makes use of the `-v`and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example: + + $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh + +This command passes the `volumename` through to the volume plugin as a +user-given name for the volume. The `volumename` must not begin with a `/`. + +By having the user specify a `volumename`, a plugin can associate the volume +with an external volume beyond the lifetime of a single container or container +host. This can be used, for example, to move a stateful container from one +server to another. + +By specifying a `volumedriver` in conjunction with a `volumename`, users can use plugins such as [Flocker](https://clusterhq.com/docker-plugin/) to manage volumes external to a single host, such as those on EBS. + + +# Create a VolumeDriver + +The container creation endpoint (`/containers/create`) accepts a `VolumeDriver` +field of type `string` allowing to specify the name of the driver. It's default +value of `"local"` (the default driver for local volumes). + +# Volume plugin protocol + +If a plugin registers itself as a `VolumeDriver` when activated, then it is +expected to provide writeable paths on the host filesystem for the Docker +daemon to provide to containers to consume. + +The Docker daemon handles bind-mounting the provided paths into user +containers. + +### /VolumeDriver.Create + +**Request**: +``` +{ + "Name": "volume_name" +} +``` + +Instruct the plugin that the user wants to create a volume, given a user +specified volume name. The plugin does not need to actually manifest the +volume on the filesystem yet (until Mount is called). + +**Response**: +``` +{ + "Err": null +} +``` + +Respond with a string error if an error occurred. + +### /VolumeDriver.Remove + +**Request**: +``` +{ + "Name": "volume_name" +} +``` + +Create a volume, given a user specified volume name. + +**Response**: +``` +{ + "Err": null +} +``` + +Respond with a string error if an error occurred. + +### /VolumeDriver.Mount + +**Request**: +``` +{ + "Name": "volume_name" +} +``` + +Docker requires the plugin to provide a volume, given a user specified volume +name. This is called once per container start. + +**Response**: +``` +{ + "Mountpoint": "/path/to/directory/on/host", + "Err": null +} +``` + +Respond with the path on the host filesystem where the volume has been made +available, and/or a string error if an error occurred. + +### /VolumeDriver.Path + +**Request**: +``` +{ + "Name": "volume_name" +} +``` + +Docker needs reminding of the path to the volume on the host. + +**Response**: +``` +{ + "Mountpoint": "/path/to/directory/on/host", + "Err": null +} +``` + +Respond with the path on the host filesystem where the volume has been made +available, and/or a string error if an error occurred. + +### /VolumeDriver.Unmount + +**Request**: +``` +{ + "Name": "volume_name" +} +``` + +Indication that Docker no longer is using the named volume. This is called once +per container stop. Plugin may deduce that it is safe to deprovision it at +this point. + +**Response**: +``` +{ + "Err": null +} +``` + +Respond with a string error if an error occurred. + diff --git a/docs/installation/SUSE.md b/docs/installation/SUSE.md new file mode 100644 index 00000000..b16e4174 --- /dev/null +++ b/docs/installation/SUSE.md @@ -0,0 +1,101 @@ + + +# openSUSE + +Docker is available in **openSUSE 12.3 and later**. Please note that due +to its current limitations Docker is able to run only **64 bit** architecture. + +Docker is not part of the official repositories of openSUSE 12.3 and +openSUSE 13.1. Hence it is necessary to add the [Virtualization +repository](https://build.opensuse.org/project/show/Virtualization) from +[OBS](https://build.opensuse.org/) to install the `docker` package. + +Execute one of the following commands to add the Virtualization repository: + + # openSUSE 12.3 + $ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization + + # openSUSE 13.1 + $ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization + +No extra repository is required for openSUSE 13.2 and later. + +# SUSE Linux Enterprise + +Docker is available in **SUSE Linux Enterprise 12 and later**. Please note that +due to its current limitations Docker is able to run only on **64 bit** +architecture. + +## Installation + +Install the Docker package. + + $ sudo zypper in docker + +Now that it's installed, let's start the Docker daemon. + + $ sudo systemctl start docker + +If we want Docker to start at boot, we should also: + + $ sudo systemctl enable docker + +The docker package creates a new group named docker. Users, other than +root user, need to be part of this group in order to interact with the +Docker daemon. You can add users with: + + $ sudo /usr/sbin/usermod -a -G docker + +To verify that everything has worked as expected: + + $ sudo docker run --rm -i -t opensuse /bin/bash + +This should download and import the `opensuse` image, and then start `bash` in +a container. To exit the container type `exit`. + +If you want your containers to be able to access the external network you must +enable the `net.ipv4.ip_forward` rule. +This can be done using YaST by browsing to the +`System -> Network Settings -> Routing` menu (for openSUSE Tumbleweed and later) or `Network Devices -> Network Settings -> Routing` menu (for SUSE Linux Enterprise 12 and previous openSUSE versions) and ensuring that the `Enable IPv4 Forwarding` box is checked. + +This option cannot be changed when networking is handled by the Network Manager. +In such cases the `/etc/sysconfig/SuSEfirewall2` file needs to be edited by +hand to ensure the `FW_ROUTE` flag is set to `yes` like so: + + FW_ROUTE="yes" + + +**Done!** + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + +## Uninstallation + +To uninstall the Docker package: + + $ sudo zypper rm docker + +The above command will not remove images, containers, volumes, or user created +configuration files on your host. If you wish to delete all images, containers, +and volumes run the following command: + + $ rm -rf /var/lib/docker + +You must delete the user created configuration files manually. + +## What's next + +Continue with the [User Guide](/userguide/). + diff --git a/docs/installation/amazon.md b/docs/installation/amazon.md new file mode 100644 index 00000000..81e5a756 --- /dev/null +++ b/docs/installation/amazon.md @@ -0,0 +1,21 @@ + + +## Amazon EC2 + +You can install Docker on any AWS EC2 Amazon Machine Image (AMI) which runs an +operating system that Docker supports. Amazon's website includes specific +instructions for [installing on Amazon +Linux](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/docker-basics.html#install_docker). To install on +another AMI, follow the instructions for its specific operating +system in this installation guide. + +For detailed information on Amazon AWS support for Docker, refer to [Amazon's +documentation](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/docker-basics.html). diff --git a/docs/installation/archlinux.md b/docs/installation/archlinux.md new file mode 100644 index 00000000..1a28c5b1 --- /dev/null +++ b/docs/installation/archlinux.md @@ -0,0 +1,85 @@ + + +# Arch Linux + +Installing on Arch Linux can be handled via the package in community: + + - [docker](https://www.archlinux.org/packages/community/x86_64/docker/) + +or the following AUR package: + + - [docker-git](https://aur.archlinux.org/packages/docker-git/) + +The docker package will install the latest tagged version of docker. The +docker-git package will build from the current master branch. + +## Dependencies + +Docker depends on several packages which are specified as dependencies +in the packages. The core dependencies are: + + - bridge-utils + - device-mapper + - iproute2 + - lxc + - sqlite + +## Installation + +For the normal package a simple + + $ sudo pacman -S docker + +is all that is needed. + +For the AUR package execute: + + $ sudo yaourt -S docker-git + +The instructions here assume **yaourt** is installed. See [Arch User +Repository](https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages) +for information on building and installing packages from the AUR if you +have not done so before. + +## Starting Docker + +There is a systemd service unit created for docker. To start the docker +service: + + $ sudo systemctl start docker + +To start on system boot: + + $ sudo systemctl enable docker + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + +## Uninstallation + +To uninstall the Docker package: + + $ sudo pacman -R docker + +To uninstall the Docker package and dependencies that are no longer needed: + + $ sudo pacman -Rns docker + +The above commands will not remove images, containers, volumes, or user created +configuration files on your host. If you wish to delete all images, containers, +and volumes run the following command: + + $ rm -rf /var/lib/docker + +You must delete the user created configuration files manually. diff --git a/docs/installation/azure.md b/docs/installation/azure.md new file mode 100644 index 00000000..75c5726f --- /dev/null +++ b/docs/installation/azure.md @@ -0,0 +1,33 @@ + + +# Microsoft Azure + + +## Creating a Docker host machine on Azure + +Please check out to the following detailed tutorials on [Microsoft Azure][0] +website to find out different ways to create a Docker-ready Linux virtual +machines on Azure: + +* [Docker Virtual Machine Extensions on Azure][1] + * [How to use the Docker VM Extension from Azure Cross-Platform Interface][2] + * [How to use the Docker VM Extension with the Azure Portal][3] +* [Using Docker Machine with Azure][4] + +## What next? + +Continue with the [User Guide](/userguide/). + +[0]: http://azure.microsoft.com/ +[1]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-vm-extension/ +[2]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-xplat-cli/ +[3]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-portal/ +[4]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-machine/ \ No newline at end of file diff --git a/docs/installation/binaries.md b/docs/installation/binaries.md new file mode 100644 index 00000000..ed02fb8a --- /dev/null +++ b/docs/installation/binaries.md @@ -0,0 +1,215 @@ + + +# Binaries + +**This instruction set is meant for hackers who want to try out Docker +on a variety of environments.** + +Before following these directions, you should really check if a packaged +version of Docker is already available for your distribution. We have +packages for many distributions, and more keep showing up all the time! + +## Check runtime dependencies + +To run properly, docker needs the following software to be installed at +runtime: + + - iptables version 1.4 or later + - Git version 1.7 or later + - procps (or similar provider of a "ps" executable) + - XZ Utils 4.9 or later + - a [properly mounted]( + https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount + point [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) + +## Check kernel dependencies + +Docker in daemon mode has specific kernel requirements. For details, +check your distribution in [*Installation*](../#installation-list). + +A 3.10 Linux kernel is the minimum requirement for Docker. +Kernels older than 3.10 lack some of the features required to run Docker +containers. These older versions are known to have bugs which cause data loss +and frequently panic under certain conditions. + +The latest minor version (3.x.y) of the 3.10 (or a newer maintained version) +Linux kernel is recommended. Keeping the kernel up to date with the latest +minor version will ensure critical kernel bugs get fixed. + +> **Warning**: +> Installing custom kernels and kernel packages is probably not +> supported by your Linux distribution's vendor. Please make sure to +> ask your vendor about Docker support first before attempting to +> install custom kernels on your distribution. + +> **Warning**: +> Installing a newer kernel might not be enough for some distributions +> which provide packages which are too old or incompatible with +> newer kernels. + +Note that Docker also has a client mode, which can run on virtually any +Linux kernel (it even builds on OS X!). + +## Enable AppArmor and SELinux when possible + +Please use AppArmor or SELinux if your Linux distribution supports +either of the two. This helps improve security and blocks certain +types of exploits. Your distribution's documentation should provide +detailed steps on how to enable the recommended security mechanism. + +Some Linux distributions enable AppArmor or SELinux by default and +they run a kernel which doesn't meet the minimum requirements (3.10 +or newer). Updating the kernel to 3.10 or newer on such a system +might not be enough to start Docker and run containers. +Incompatibilities between the version of AppArmor/SELinux user +space utilities provided by the system and the kernel could prevent +Docker from running, from starting containers or, cause containers to +exhibit unexpected behaviour. + +> **Warning**: +> If either of the security mechanisms is enabled, it should not be +> disabled to make Docker or its containers run. This will reduce +> security in that environment, lose support from the distribution's +> vendor for the system, and might break regulations and security +> policies in heavily regulated environments. + +## Get the Docker binary + +You can download either the latest release binary or a specific version. +After downloading a binary file, you must set the file's execute bit to run it. + +To set the file's execute bit on Linux and OS X: + + $ chmod +x docker + +To get the list of stable release version numbers from GitHub, view the +`docker/docker` [releases page](https://github.com/docker/docker/releases). + +> **Note** +> +> 1) You can get the MD5 and SHA256 hashes by appending .md5 and .sha256 to the URLs respectively +> +> 2) You can get the compressed binaries by appending .tgz to the URLs + +### Get the Linux binary + +To download the latest version for Linux, use the +following URLs: + + https://get.docker.com/builds/Linux/i386/docker-latest + + https://get.docker.com/builds/Linux/x86_64/docker-latest + +To download a specific version for Linux, use the +following URL patterns: + + https://get.docker.com/builds/Linux/i386/docker- + + https://get.docker.com/builds/Linux/x86_64/docker- + +For example: + + https://get.docker.com/builds/Linux/i386/docker-1.6.0 + + https://get.docker.com/builds/Linux/x86_64/docker-1.6.0 + + +### Get the Mac OS X binary + +The Mac OS X binary is only a client. You cannot use it to run the `docker` +daemon. To download the latest version for Mac OS X, use the following URLs: + + https://get.docker.com/builds/Darwin/i386/docker-latest + + https://get.docker.com/builds/Darwin/x86_64/docker-latest + +To download a specific version for Mac OS X, use the +following URL patterns: + + https://get.docker.com/builds/Darwin/i386/docker- + + https://get.docker.com/builds/Darwin/x86_64/docker- + +For example: + + https://get.docker.com/builds/Darwin/i386/docker-1.6.0 + + https://get.docker.com/builds/Darwin/x86_64/docker-1.6.0 + +### Get the Windows binary + +You can only download the Windows client binary for version `1.6.0` onwards. +Moreover, the binary is only a client, you cannot use it to run the `docker` daemon. +To download the latest version for Windows, use the following URLs: + + https://get.docker.com/builds/Windows/i386/docker-latest.exe + + https://get.docker.com/builds/Windows/x86_64/docker-latest.exe + +To download a specific version for Windows, use the following URL pattern: + + https://get.docker.com/builds/Windows/i386/docker-.exe + + https://get.docker.com/builds/Windows/x86_64/docker-.exe + +For example: + + https://get.docker.com/builds/Windows/i386/docker-1.6.0.exe + + https://get.docker.com/builds/Windows/x86_64/docker-1.6.0.exe + + +## Run the Docker daemon + + # start the docker in daemon mode from the directory you unpacked + $ sudo ./docker daemon & + +## Giving non-root access + +The `docker` daemon always runs as the root user, and the `docker` +daemon binds to a Unix socket instead of a TCP port. By default that +Unix socket is owned by the user *root*, and so, by default, you can +access it with `sudo`. + +If you (or your Docker installer) create a Unix group called *docker* +and add users to it, then the `docker` daemon will make the ownership of +the Unix socket read/writable by the *docker* group when the daemon +starts. The `docker` daemon must always run as the root user, but if you +run the `docker` client as a user in the *docker* group then you don't +need to add `sudo` to all the client commands. + +> **Warning**: +> The *docker* group (or the group specified with `-G`) is root-equivalent; +> see [*Docker Daemon Attack Surface*]( +> /articles/security/#docker-daemon-attack-surface) details. + +## Upgrades + +To upgrade your manual installation of Docker, first kill the docker +daemon: + + $ killall docker + +Then follow the regular installation steps. + +## Run your first container! + + # check your docker version + $ sudo ./docker version + + # run a container and open an interactive shell in the container + $ sudo ./docker run -i -t ubuntu /bin/bash + +Continue with the [User Guide](/userguide/). diff --git a/docs/installation/centos.md b/docs/installation/centos.md new file mode 100644 index 00000000..09c79940 --- /dev/null +++ b/docs/installation/centos.md @@ -0,0 +1,189 @@ + + +# CentOS + +Docker is supported on the following versions of CentOS: + +* CentOS 7.X + +Installation on other binary compatible EL7 distributions such as Scientific +Linux might succeed, but Docker does not test or support Docker on these +distributions. + +This page instructs you to install using Docker-managed release packages and +installation mechanisms. Using these packages ensures you get the latest release +of Docker. If you wish to install using CentOS-managed packages, consult your +CentOS documentation. + +## Prerequisites + +Docker requires a 64-bit installation regardless of your CentOS version. Also, +your kernel must be 3.10 at minimum, which CentOS 7 runs. + +To check your current kernel version, open a terminal and use `uname -r` to +display your kernel version: + + $ uname -r + 3.10.0-229.el7.x86_64 + +Finally, is it recommended that you fully update your system. Please keep in +mind that your system should be fully patched to fix any potential kernel bugs. +Any reported kernel bugs may have already been fixed on the latest kernel +packages. + +## Install + +There are two ways to install Docker Engine. You can use `curl` with the `get.docker.com` site. This method runs an installation script which installs via the `yum` package manager. Or you can install with the `yum` package manager directly yourself. + +### Install with the script + + +1. Log into your machine as a user with `sudo` or `root` privileges. + +2. Make sure your existing yum packages are up-to-date. + + $ sudo yum update + +3. Run the Docker installation script. + + $ curl -sSL https://get.docker.com/ | sh + + This script adds the `docker.repo` repository and installs Docker. + +4. Start the Docker daemon. + + $ sudo service docker start + +5. Verify `docker` is installed correctly by running a test image in a container. + + $ sudo docker run hello-world + Unable to find image 'hello-world:latest' locally + latest: Pulling from hello-world + a8219747be10: Pull complete + 91c95931e552: Already exists + hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. + Digest: sha256:aa03e5d0d5553b4c3473e89c8619cf79df368babd1.7.1cf5daeb82aab55838d + Status: Downloaded newer image for hello-world:latest + Hello from Docker. + This message shows that your installation appears to be working correctly. + + To generate this message, Docker took the following steps: + 1. The Docker client contacted the Docker daemon. + 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. + (Assuming it was not already locally available.) + 3. The Docker daemon created a new container from that image which runs the + executable that produces the output you are currently reading. + 4. The Docker daemon streamed that output to the Docker client, which sent it + to your terminal. + + To try something more ambitious, you can run an Ubuntu container with: + $ docker run -it ubuntu bash + + For more examples and ideas, visit: + http://docs.docker.com/userguide/ + +### Install without the script + +1. Log into your machine as a user with `sudo` or `root` privileges. + +2. Make sure your existing yum packages are up-to-date. + + $ sudo yum update + +3. Add the yum repo yourself. + + For CentOS 7 run: + + $ cat >/etc/yum.repos.d/docker.repo <<-EOF + [dockerrepo] + name=Docker Repository + baseurl=https://yum.dockerproject.org/repo/main/centos/7 + enabled=1 + gpgcheck=1 + gpgkey=https://yum.dockerproject.org/gpg + EOF + +4. Install the Docker package. + + $ sudo yum install docker-engine + +5. Start the Docker daemon. + + $ sudo service docker start + +6. Verify `docker` is installed correctly by running a test image in a container. + + $ sudo docker run hello-world + +## Create a docker group + +The `docker` daemon binds to a Unix socket instead of a TCP port. By default +that Unix socket is owned by the user `root` and other users can access it with +`sudo`. For this reason, `docker` daemon always runs as the `root` user. + +To avoid having to use `sudo` when you use the `docker` command, create a Unix +group called `docker` and add users to it. When the `docker` daemon starts, it +makes the ownership of the Unix socket read/writable by the `docker` group. + +>**Warning**: The `docker` group is equivalent to the `root` user; For details +>on how this impacts security in your system, see [*Docker Daemon Attack +>Surface*](/articles/security/#docker-daemon-attack-surface) for details. + +To create the `docker` group and add your user: + +1. Log into Centos as a user with `sudo` privileges. + +2. Create the `docker` group and add your user. + + `sudo usermod -aG docker your_username` + +3. Log out and log back in. + + This ensures your user is running with the correct permissions. + +4. Verify your work by running `docker` without `sudo`. + + $ docker run hello-world + +## Start the docker daemon at boot + +To ensure Docker starts when you boot your system, do the following: + + $ sudo chkconfig docker on + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our Systemd article to +learn how to [customize your Systemd Docker daemon options](/articles/systemd/). + + +## Uninstall + +You can uninstall the Docker software with `yum`. + +1. List the package you have installed. + + $ yum list installed | grep docker + yum list installed | grep docker + docker-engine.x86_64 1.7.1-1.el7 + @/docker-engine-1.7.1-1.el7.x86_64.rpm + +2. Remove the package. + + $ sudo yum -y remove docker-engine.x86_64 + + This command does not remove images, containers, volumes, or user-created + configuration files on your host. + +3. To delete all images, containers, and volumes, run the following command: + + $ rm -rf /var/lib/docker + +4. Locate and delete any user-created configuration files. diff --git a/docs/installation/cruxlinux.md b/docs/installation/cruxlinux.md new file mode 100644 index 00000000..e5276c9b --- /dev/null +++ b/docs/installation/cruxlinux.md @@ -0,0 +1,92 @@ + + +# CRUX Linux + +Installing on CRUX Linux can be handled via the contrib ports from +[James Mills](http://prologic.shortcircuit.net.au/) and are included in the +official [contrib](http://crux.nu/portdb/?a=repo&q=contrib) ports: + +- docker + +The `docker` port will build and install the latest tagged version of Docker. + + +## Installation + +Assuming you have contrib enabled, update your ports tree and install docker: + + $ sudo prt-get depinst docker + + +## Kernel requirements + +To have a working **CRUX+Docker** Host you must ensure your Kernel has +the necessary modules enabled for the Docker Daemon to function correctly. + +Please read the `README`: + + $ sudo prt-get readme docker + +The `docker` port installs the `contrib/check-config.sh` script +provided by the Docker contributors for checking your kernel +configuration as a suitable Docker host. + +To check your Kernel configuration run: + + $ /usr/share/docker/check-config.sh + +## Starting Docker + +There is a rc script created for Docker. To start the Docker service: + + $ sudo /etc/rc.d/docker start + +To start on system boot: + + - Edit `/etc/rc.conf` + - Put `docker` into the `SERVICES=(...)` array after `net`. + +## Images + +There is a CRUX image maintained by [James Mills](http://prologic.shortcircuit.net.au/) +as part of the Docker "Official Library" of images. To use this image simply pull it +or use it as part of your `FROM` line in your `Dockerfile(s)`. + + $ docker pull crux + $ docker run -i -t crux + +There are also user contributed [CRUX based image(s)](https://registry.hub.docker.com/repos/crux/) on the Docker Hub. + + +## Uninstallation + +To uninstall the Docker package: + + $ sudo prt-get remove docker + +The above command will not remove images, containers, volumes, or user created +configuration files on your host. If you wish to delete all images, containers, +and volumes run the following command: + + $ rm -rf /var/lib/docker + +You must delete the user created configuration files manually. + +## Issues + +If you have any issues please file a bug with the +[CRUX Bug Tracker](http://crux.nu/bugs/). + +## Support + +For support contact the [CRUX Mailing List](http://crux.nu/Main/MailingLists) +or join CRUX's [IRC Channels](http://crux.nu/Main/IrcChannels). on the +[FreeNode](http://freenode.net/) IRC Network. diff --git a/docs/installation/debian.md b/docs/installation/debian.md new file mode 100644 index 00000000..54526cf3 --- /dev/null +++ b/docs/installation/debian.md @@ -0,0 +1,156 @@ + + +# Debian + +Docker is supported on the following versions of Debian: + + - [*Debian 8.0 Jessie (64-bit)*](#debian-jessie-80-64-bit) + - [*Debian 7.7 Wheezy (64-bit)*](#debian-wheezy-stable-7-x-64-bit) + +## Debian Jessie 8.0 (64-bit) + +Debian 8 comes with a 3.16.0 Linux kernel, the `docker.io` package can be found in the `jessie-backports` repository. Reasoning behind this can be found here. Instructions how to enable the backports repository can be found here. + +> **Note**: +> Debian contains a much older KDE3/GNOME2 package called ``docker``, so the +> package and the executable are called ``docker.io``. + +### Installation + +Make sure you enabled the `jessie-backports` repository, as stated above. + +To install the latest Debian package (may not be the latest Docker release): + + $ sudo apt-get update + $ sudo apt-get install docker.io + +To verify that everything has worked as expected: + + $ sudo docker run --rm hello-world + +This command downloads and runs the `hello-world` image in a container. When the +container runs, it prints an informational message. Then, it exits. + +> **Note**: +> If you want to enable memory and swap accounting see +> [this](/installation/ubuntulinux/#memory-and-swap-accounting). + +### Uninstallation + +To uninstall the Docker package: + + $ sudo apt-get purge docker-io + +To uninstall the Docker package and dependencies that are no longer needed: + + $ sudo apt-get autoremove --purge docker-io + +The above commands will not remove images, containers, volumes, or user created +configuration files on your host. If you wish to delete all images, containers, +and volumes run the following command: + + $ rm -rf /var/lib/docker + +You must delete the user created configuration files manually. + +## Debian Wheezy/Stable 7.x (64-bit) + +Docker requires Kernel 3.8+, while Wheezy ships with Kernel 3.2 (for more details +on why 3.8 is required, see discussion on +[bug #407](https://github.com/docker/docker/issues/407)). + +Fortunately, wheezy-backports currently has [Kernel 3.16 +](https://packages.debian.org/search?suite=wheezy-backports§ion=all&arch=any&searchon=names&keywords=linux-image-amd64), +which is officially supported by Docker. + +### Installation + +1. Install Kernel from wheezy-backports + + Add the following line to your `/etc/apt/sources.list` + + `deb http://http.debian.net/debian wheezy-backports main` + + then install the `linux-image-amd64` package (note the use of + `-t wheezy-backports`) + + $ sudo apt-get update + $ sudo apt-get install -t wheezy-backports linux-image-amd64 + +2. Restart your system. This is necessary for Debian to use your new kernel. + +3. Install Docker using the get.docker.com script: + + `curl -sSL https://get.docker.com/ | sh` + +>**Note**: If your company is behind a filtering proxy, you may find that the +>`apt-key` +>command fails for the Docker repo during installation. To work around this, +>add the key directly using the following: +> +> $ curl -sSL https://get.docker.com/gpg | sudo apt-key add - + +### Uninstallation + +To uninstall the Docker package: + + $ sudo apt-get purge docker-engine + +To uninstall the Docker package and dependencies that are no longer needed: + + $ sudo apt-get autoremove --purge docker-engine + +The above commands will not remove images, containers, volumes, or user created +configuration files on your host. If you wish to delete all images, containers, +and volumes run the following command: + + $ rm -rf /var/lib/docker + +You must delete the user created configuration files manually. + +## Giving non-root access + +The `docker` daemon always runs as the `root` user and the `docker` +daemon binds to a Unix socket instead of a TCP port. By default that +Unix socket is owned by the user `root`, and so, by default, you can +access it with `sudo`. + +If you (or your Docker installer) create a Unix group called `docker` +and add users to it, then the `docker` daemon will make the ownership of +the Unix socket read/writable by the `docker` group when the daemon +starts. The `docker` daemon must always run as the root user, but if you +run the `docker` client as a user in the `docker` group then you don't +need to add `sudo` to all the client commands. From Docker 0.9.0 you can +use the `-G` flag to specify an alternative group. + +> **Warning**: +> The `docker` group (or the group specified with the `-G` flag) is +> `root`-equivalent; see [*Docker Daemon Attack Surface*]( +> /articles/security/#docker-daemon-attack-surface) details. + +**Example:** + + # Add the docker group if it doesn't already exist. + $ sudo groupadd docker + + # Add the connected user "${USER}" to the docker group. + # Change the user name to match your preferred user. + # You may have to logout and log back in again for + # this to take effect. + $ sudo gpasswd -a ${USER} docker + + # Restart the Docker daemon. + $ sudo service docker restart + + +## What next? + +Continue with the [User Guide](/userguide/). diff --git a/docs/installation/fedora.md b/docs/installation/fedora.md new file mode 100644 index 00000000..b1bdb19c --- /dev/null +++ b/docs/installation/fedora.md @@ -0,0 +1,250 @@ + + +# Fedora + +Docker is supported on the following versions of Fedora: + +- Fedora 20 +- Fedora 21 +- Fedora 22 + +This page instructs you to install using Docker-managed release packages and +installation mechanisms. Using these packages ensures you get the latest release +of Docker. If you wish to install using Fedora-managed packages, consult your +Fedora release documentation for information on Fedora's Docker support. + +##Prerequisites + +Docker requires a 64-bit installation regardless of your Fedora version. Also, your kernel must be 3.10 at minimum. To check your current kernel +version, open a terminal and use `uname -r` to display your kernel version: + + $ uname -r + 3.19.5-100.fc20.x86_64 + +If your kernel is at a older version, you must update it. + +Finally, is it recommended that you fully update your system. Please keep in +mind that your system should be fully patched to fix any potential kernel bugs. Any +reported kernel bugs may have already been fixed on the latest kernel packages + + +## Install + +There are two ways to install Docker Engine. You can use `curl` with the `get.docker.com` site. This method runs an installation script which installs via the `yum` package manager. Or you can install with the `yum` package manager directly yourself. + +### Install with the script + + +1. Log into your machine as a user with `sudo` or `root` privileges. + +2. Make sure your existing yum packages are up-to-date. + + $ sudo yum update + +3. Run the Docker installation script. + + $ curl -sSL https://get.docker.com/ | sh + + This script adds the `docker.repo` repository and installs Docker. + +4. Start the Docker daemon. + + $ sudo service docker start + +5. Verify `docker` is installed correctly by running a test image in a container. + + $ sudo docker run hello-world + Unable to find image 'hello-world:latest' locally + latest: Pulling from hello-world + a8219747be10: Pull complete + 91c95931e552: Already exists + hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. + Digest: sha256:aa03e5d0d5553b4c3473e89c8619cf79df368babd1.7.1cf5daeb82aab55838d + Status: Downloaded newer image for hello-world:latest + Hello from Docker. + This message shows that your installation appears to be working correctly. + + To generate this message, Docker took the following steps: + 1. The Docker client contacted the Docker daemon. + 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. + (Assuming it was not already locally available.) + 3. The Docker daemon created a new container from that image which runs the + executable that produces the output you are currently reading. + 4. The Docker daemon streamed that output to the Docker client, which sent it + to your terminal. + + To try something more ambitious, you can run an Ubuntu container with: + $ docker run -it ubuntu bash + + For more examples and ideas, visit: + http://docs.docker.com/userguide/ + +### Install without the script + +1. Log into your machine as a user with `sudo` or `root` privileges. + +2. Make sure your existing yum packages are up-to-date. + + $ sudo yum update + +3. Add the yum repo yourself. + + For Fedora 20 run: + + $ cat >/etc/yum.repos.d/docker.repo <<-EOF + [dockerrepo] + name=Docker Repository + baseurl=https://yum.dockerproject.org/repo/main/fedora/20 + enabled=1 + gpgcheck=1 + gpgkey=https://yum.dockerproject.org/gpg + EOF + + For Fedora 21 run: + + $ cat >/etc/yum.repos.d/docker.repo <<-EOF + [dockerrepo] + name=Docker Repository + baseurl=https://yum.dockerproject.org/repo/main/fedora/21 + enabled=1 + gpgcheck=1 + gpgkey=https://yum.dockerproject.org/gpg + EOF + + For Fedora 22 run: + + $ cat >/etc/yum.repos.d/docker.repo <<-EOF + [dockerrepo] + name=Docker Repository + baseurl=https://yum.dockerproject.org/repo/main/fedora/22 + enabled=1 + gpgcheck=1 + gpgkey=https://yum.dockerproject.org/gpg + EOF + +4. Install the Docker package. + + $ sudo yum install docker-engine + +5. Start the Docker daemon. + + $ sudo service docker start + +6. Verify `docker` is installed correctly by running a test image in a container. + + $ sudo docker run hello-world + +## Create a docker group + +The `docker` daemon binds to a Unix socket instead of a TCP port. By default +that Unix socket is owned by the user `root` and other users can access it with +`sudo`. For this reason, `docker` daemon always runs as the `root` user. + +To avoid having to use `sudo` when you use the `docker` command, create a Unix +group called `docker` and add users to it. When the `docker` daemon starts, it +makes the ownership of the Unix socket read/writable by the `docker` group. + +>**Warning**: The `docker` group is equivalent to the `root` user; For details +>on how this impacts security in your system, see [*Docker Daemon Attack +>Surface*](/articles/security/#docker-daemon-attack-surface) for details. + +To create the `docker` group and add your user: + +1. Log into your system as a user with `sudo` privileges. + +2. Create the `docker` group and add your user. + + `sudo usermod -aG docker your_username` + +3. Log out and log back in. + + This ensures your user is running with the correct permissions. + +4. Verify your work by running `docker` without `sudo`. + + $ docker run hello-world + Unable to find image 'hello-world:latest' locally + latest: Pulling from hello-world + a8219747be10: Pull complete + 91c95931e552: Already exists + hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. + Digest: sha256:aa03e5d0d5553b4c3473e89c8619cf79df368babd18681cf5daeb82aab55838d + Status: Downloaded newer image for hello-world:latest + Hello from Docker. + This message shows that your installation appears to be working correctly. + + To generate this message, Docker took the following steps: + 1. The Docker client contacted the Docker daemon. + 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. + (Assuming it was not already locally available.) + 3. The Docker daemon created a new container from that image which runs the + executable that produces the output you are currently reading. + 4. The Docker daemon streamed that output to the Docker client, which sent it + to your terminal. + + To try something more ambitious, you can run an Ubuntu container with: + $ docker run -it ubuntu bash + + For more examples and ideas, visit: + http://docs.docker.com/userguide/ + +## Start the docker daemon at boot + +To ensure Docker starts when you boot your system, do the following: + + $ sudo chkconfig docker on + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our Systemd article to +learn how to [customize your Systemd Docker daemon options](/articles/systemd/). + +## Running Docker with a manually-defined network + +If you manually configure your network using `systemd-network` with `systemd` version 219 or higher, containers you start with Docker may be unable to access your network. +Beginning with version 220, the forwarding setting for a given network (`net.ipv4.conf..forwarding`) defaults to *off*. This setting prevents IP forwarding. It also conflicts with Docker which enables the `net.ipv4.conf.all.forwarding` setting within a container. + +To work around this, edit the `.network` file in +`/usr/lib/systemd/network/` on your Docker host (ex: `/usr/lib/systemd/network/80-container-host0.network`) add the following block: + +``` +[Network] +... +IPForward=kernel +# OR +IPForward=true +... +``` + +This configuration allows IP forwarding from the container as expected. + +## Uninstall + +You can uninstall the Docker software with `yum`. + +1. List the package you have installed. + + $ yum list installed | grep docker + yum list installed | grep docker + docker-engine.x86_64 1.7.1-0.1.fc20 + @/docker-engine-1.7.1-0.1.fc20.el7.x86_64 + +2. Remove the package. + + $ sudo yum -y remove docker-engine.x86_64 + + This command does not remove images, containers, volumes, or user-created + configuration files on your host. + +3. To delete all images, containers, and volumes, run the following command: + + $ rm -rf /var/lib/docker + +4. Locate and delete any user-created configuration files. diff --git a/docs/installation/frugalware.md b/docs/installation/frugalware.md new file mode 100644 index 00000000..d8c3ac6d --- /dev/null +++ b/docs/installation/frugalware.md @@ -0,0 +1,74 @@ + + +# FrugalWare + +Installing on FrugalWare is handled via the official packages: + + - [lxc-docker i686](http://www.frugalware.org/packages/200141) + - [lxc-docker x86_64](http://www.frugalware.org/packages/200130) + +The lxc-docker package will install the latest tagged version of Docker. + +## Dependencies + +Docker depends on several packages which are specified as dependencies +in the packages. The core dependencies are: + + - systemd + - lvm2 + - sqlite3 + - libguestfs + - lxc + - iproute2 + - bridge-utils + +## Installation + +A simple + + $ sudo pacman -S lxc-docker + +is all that is needed. + +## Starting Docker + +There is a systemd service unit created for Docker. To start Docker as +service: + + $ sudo systemctl start lxc-docker + +To start on system boot: + + $ sudo systemctl enable lxc-docker + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + +## Uninstallation + +To uninstall the Docker package: + + $ sudo pacman -R lxc-docker + +To uninstall the Docker package and dependencies that are no longer needed: + + $ sudo pacman -Rns lxc-docker + +The above commands will not remove images, containers, volumes, or user created +configuration files on your host. If you wish to delete all images, containers, +and volumes run the following command: + + $ rm -rf /var/lib/docker + +You must delete the user created configuration files manually. diff --git a/docs/installation/gentoolinux.md b/docs/installation/gentoolinux.md new file mode 100644 index 00000000..f954e60e --- /dev/null +++ b/docs/installation/gentoolinux.md @@ -0,0 +1,121 @@ + + +# Gentoo + +Installing Docker on Gentoo Linux can be accomplished using one of two ways: the **official** way and the `docker-overlay` way. + +Official project page of [Gentoo Docker](https://wiki.gentoo.org/wiki/Project:Docker) team. + +## Official way +The first and recommended way if you are looking for a stable +experience is to use the official `app-emulation/docker` package directly +from the tree. + +If any issues arise from this ebuild including, missing kernel +configuration flags or dependencies, open a bug +on the Gentoo [Bugzilla](https://bugs.gentoo.org) assigned to `docker AT gentoo DOT org` +or join and ask in the official +[IRC](http://webchat.freenode.net?channels=%23gentoo-containers&uio=d4) channel on the Freenode network. + +## docker-overlay way + +If you're looking for a `-bin` ebuild, a live ebuild, or a bleeding edge +ebuild, use the provided overlay, [docker-overlay](https://github.com/tianon/docker-overlay) +which can be added using `app-portage/layman`. The most accurate and +up-to-date documentation for properly installing and using the overlay +can be found in the [overlay](https://github.com/tianon/docker-overlay/blob/master/README.md#using-this-overlay). + +If any issues arise from this ebuild or the resulting binary, including +and especially missing kernel configuration flags or dependencies, +open an [issue](https://github.com/tianon/docker-overlay/issues) on +the `docker-overlay` repository or ping `tianon` directly in the `#docker` +IRC channel on the Freenode network. + +## Installation + +### Available USE flags + +| USE Flag | Default | Description | +| ------------- |:-------:|:------------| +| aufs | |Enables dependencies for the "aufs" graph driver, including necessary kernel flags.| +| btrfs | |Enables dependencies for the "btrfs" graph driver, including necessary kernel flags.| +| contrib | Yes |Install additional contributed scripts and components.| +| device-mapper | Yes |Enables dependencies for the "devicemapper" graph driver, including necessary kernel flags.| +| doc | |Add extra documentation (API, Javadoc, etc). It is recommended to enable per package instead of globally.| +| lxc | |Enables dependencies for the "lxc" execution driver.| +| vim-syntax | |Pulls in related vim syntax scripts.| +| zsh-completion| |Enable zsh completion support.| + +USE flags are described in detail on [tianon's +blog](https://tianon.github.io/post/2014/05/17/docker-on-gentoo.html). + +The package should properly pull in all the necessary dependencies and +prompt for all necessary kernel options. + + $ sudo emerge -av app-emulation/docker + +>Note: Sometimes there is a disparity between the latest versions +>in the official **Gentoo tree** and the **docker-overlay**. +>Please be patient, and the latest version should propagate shortly. + +## Starting Docker + +Ensure that you are running a kernel that includes all the necessary +modules and configuration (and optionally for device-mapper +and AUFS or Btrfs, depending on the storage driver you've decided to use). + +To use Docker, the `docker` daemon must be running as **root**. +To use Docker as a **non-root** user, add yourself to the **docker** +group by running the following command: + + $ sudo usermod -a -G docker user + +### OpenRC + +To start the `docker` daemon: + + $ sudo /etc/init.d/docker start + +To start on system boot: + + $ sudo rc-update add docker default + +### systemd + +To start the `docker` daemon: + + $ sudo systemctl start docker + +To start on system boot: + + $ sudo systemctl enable docker + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + +## Uninstallation + +To uninstall the Docker package: + + $ sudo emerge -cav app-emulation/docker + +To uninstall the Docker package and dependencies that are no longer needed: + + $ sudo emerge -C app-emulation/docker + +The above commands will not remove images, containers, volumes, or user created +configuration files on your host. If you wish to delete all images, containers, +and volumes run the following command: + + $ rm -rf /var/lib/docker + +You must delete the user created configuration files manually. diff --git a/docs/installation/google.md b/docs/installation/google.md new file mode 100644 index 00000000..b507f41b --- /dev/null +++ b/docs/installation/google.md @@ -0,0 +1,47 @@ + + +# Google Cloud Platform + +## QuickStart with Container-optimized Google Compute Engine images + +1. Go to [Google Cloud Console][1] and create a new Cloud Project with + [Compute Engine enabled][2] + +2. Download and configure the [Google Cloud SDK][3] to use your + project with the following commands: + + $ curl -sSL https://sdk.cloud.google.com | bash + $ gcloud auth login + $ gcloud config set project + +3. Start a new instance using the latest [Container-optimized image][4]: + (select a zone close to you and the desired instance size) + + $ gcloud compute instances create docker-playground \ + --image container-vm \ + --zone us-central1-a \ + --machine-type f1-micro + +4. Connect to the instance using SSH: + + $ gcloud compute ssh --zone us-central1-a docker-playground + docker-playground:~$ sudo docker run hello-world + Hello from Docker. + This message shows that your installation appears to be working correctly. + ... + +Read more about [deploying Containers on Google Cloud Platform][5]. + +[1]: https://cloud.google.com/console +[2]: https://developers.google.com/compute/docs/signup +[3]: https://developers.google.com/cloud/sdk +[4]: https://developers.google.com/compute/docs/containers#container-optimized_google_compute_engine_images +[5]: https://developers.google.com/compute/docs/containers diff --git a/docs/installation/images/bad_host.png b/docs/installation/images/bad_host.png new file mode 100644 index 00000000..cdc78def Binary files /dev/null and b/docs/installation/images/bad_host.png differ diff --git a/docs/installation/images/cool_view.png b/docs/installation/images/cool_view.png new file mode 100644 index 00000000..8eebf216 Binary files /dev/null and b/docs/installation/images/cool_view.png differ diff --git a/docs/installation/images/good_host.png b/docs/installation/images/good_host.png new file mode 100644 index 00000000..2a6e7c47 Binary files /dev/null and b/docs/installation/images/good_host.png differ diff --git a/docs/installation/images/kitematic.png b/docs/installation/images/kitematic.png new file mode 100644 index 00000000..5bb221cc Binary files /dev/null and b/docs/installation/images/kitematic.png differ diff --git a/docs/installation/images/linux_docker_host.svg b/docs/installation/images/linux_docker_host.svg new file mode 100644 index 00000000..0ad7240b --- /dev/null +++ b/docs/installation/images/linux_docker_host.svg @@ -0,0 +1,1195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/installation/images/mac_docker_host.svg b/docs/installation/images/mac_docker_host.svg new file mode 100644 index 00000000..a885a32c --- /dev/null +++ b/docs/installation/images/mac_docker_host.svg @@ -0,0 +1,1243 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/installation/images/newsite_view.png b/docs/installation/images/newsite_view.png new file mode 100644 index 00000000..27b6b1a4 Binary files /dev/null and b/docs/installation/images/newsite_view.png differ diff --git a/docs/installation/images/osx-installer.png b/docs/installation/images/osx-installer.png new file mode 100644 index 00000000..15eb4083 Binary files /dev/null and b/docs/installation/images/osx-installer.png differ diff --git a/docs/installation/images/win_docker_host.svg b/docs/installation/images/win_docker_host.svg new file mode 100644 index 00000000..eef284e7 --- /dev/null +++ b/docs/installation/images/win_docker_host.svg @@ -0,0 +1,1259 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/installation/images/windows-boot2docker-cmd.png b/docs/installation/images/windows-boot2docker-cmd.png new file mode 100644 index 00000000..09e3206e Binary files /dev/null and b/docs/installation/images/windows-boot2docker-cmd.png differ diff --git a/docs/installation/images/windows-boot2docker-powershell.png b/docs/installation/images/windows-boot2docker-powershell.png new file mode 100644 index 00000000..b1ef8967 Binary files /dev/null and b/docs/installation/images/windows-boot2docker-powershell.png differ diff --git a/docs/installation/images/windows-boot2docker-start.png b/docs/installation/images/windows-boot2docker-start.png new file mode 100644 index 00000000..1c13577e Binary files /dev/null and b/docs/installation/images/windows-boot2docker-start.png differ diff --git a/docs/installation/images/windows-installer.png b/docs/installation/images/windows-installer.png new file mode 100644 index 00000000..7cf9b0d7 Binary files /dev/null and b/docs/installation/images/windows-installer.png differ diff --git a/docs/installation/index.md b/docs/installation/index.md new file mode 100644 index 00000000..b87ec9ad --- /dev/null +++ b/docs/installation/index.md @@ -0,0 +1,33 @@ + + +# Supported installation + +Docker supports installation on the following: + +* [Amazon EC2 Installation](amazon) +* [Arch Linux](archlinux) +* [Microsoft Azure platform](azure) +* [Installation from binaries](binaries) +* [CentOS](centos) +* [CRUX Linux](cruxlinux) +* [Debian](debian) +* [Fedora](fedora) +* [FrugalWare](frugalware) +* [Gentoo](gentoolinux) +* [Google Cloud Platform](google) +* [Install on Joyent Public Cloud](joyent) +* [Mac OS X](mac) +* [Oracle Linux](oracle) +* [Rackspace Cloud](rackspace) +* [Red Hat Enterprise Linux](rhel) +* [IBM SoftLayer](softlayer) +* [openSUSE and SUSE Linux Enterprise](SUSE) +* [Ubuntu](ubuntulinux) +* [Windows](windows) + diff --git a/docs/installation/joyent.md b/docs/installation/joyent.md new file mode 100644 index 00000000..b135fa5e --- /dev/null +++ b/docs/installation/joyent.md @@ -0,0 +1,29 @@ + + +## Install on Joyent Public Cloud + +1. Sign in to the [Joyent customer portal](https://my.joyent.com/) + +2. [Create a Docker host](https://docs.joyent.com/jpc/managing-docker-containers/creating-a-docker-host). + +## Start and manage containers + +1. [Start containers in the web UI](https://docs.joyent.com/jpc/managing-docker-containers/starting-a-container) + +2. [Configure the Docker CLI on your laptop](https://docs.joyent.com/jpc/managing-docker-containers/access-your-jpc-docker-hosts-from-the-docker-cli) to connect to the remote host to launch and manage containers. + +3. SSH into the Docker host. + +4. Launch containers using the Docker CLI. + +## Where to go next + +Continue with the [Docker user guide](/userguide/), read Joyent's [getting started blog post](https://www.joyent.com/blog/first-steps-with-joyents-container-service), and [full documentation](https://docs.joyent.com/jpc/managing-docker-containers). \ No newline at end of file diff --git a/docs/installation/mac.md b/docs/installation/mac.md new file mode 100644 index 00000000..c6ae0b37 --- /dev/null +++ b/docs/installation/mac.md @@ -0,0 +1,432 @@ + + +# Mac OS X + +> **Note**: This release of Docker deprecates the Boot2Docker command line in +> favor of Docker Machine. Use the Docker Toolbox to install Docker Machine as +> well as the other Docker tools. + +You install Docker using Docker Toolbox. Docker Toolbox includes the following Docker tools: + +* Docker Machine for running the `docker-machine` binary +* Docker Engine for running the `docker` binary +* Docker Compose for running the `docker-compose` binary +* Kitematic, the Docker GUI +* a shell preconfigured for a Docker command-line environment +* Oracle VM VirtualBox + +Because the Docker daemon uses Linux-specific kernel features, you can't run +Docker natively in OS X. Instead, you must use `docker-machine` to create and +attach to a virtual machine (VM). This machine is a Linux VM that hosts Docker +for you on your Mac. + +**Requirements** + +Your Mac must be running OS X 10.8 "Mountain Lion" or newer to install the +Docker Toolbox. + +### Learn the key concepts before installing + +In a Docker installation on Linux, your physical machine is both the localhost +and the Docker host. In networking, localhost means your computer. The Docker +host is the computer on which the containers run. + +On a typical Linux installation, the Docker client, the Docker daemon, and any +containers run directly on your localhost. This means you can address ports on a +Docker container using standard localhost addressing such as `localhost:8000` or +`0.0.0.0:8376`. + +![Linux Architecture Diagram](/installation/images/linux_docker_host.svg) + +In an OS X installation, the `docker` daemon is running inside a Linux VM called +`default`. The `default` is a lightweight Linux VM made specifically to run +the Docker daemon on Mac OS X. The VM runs completely from RAM, is a small ~24MB +download, and boots in approximately 5s. + +![OSX Architecture Diagram](/installation/images/mac_docker_host.svg) + +In OS X, the Docker host address is the address of the Linux VM. When you start +the VM with `docker-machine` it is assigned an IP address. When you start a +container, the ports on a container map to ports on the VM. To see this in +practice, work through the exercises on this page. + + +### Installation + +If you have VirtualBox running, you must shut it down before running the +installer. + +1. Go to the [Docker Toolbox](https://www.docker.com/toolbox) page. + +2. Click the installer link to download. + +3. Install Docker Toolbox by double-clicking the package or by right-clicking +and choosing "Open" from the pop-up menu. + + The installer launches the "Install Docker Toolbox" dialog. + + ![Install Docker Toolbox](/installation/images/mac-welcome-page.png) + +4. Press "Continue" to install the toolbox. + + The installer presents you with options to customize the standard + installation. + + ![Standard install](/installation/images/mac-page-two.png) + + By default, the standard Docker Toolbox installation: + + * installs binaries for the Docker tools in `/usr/local/bin` + * makes these binaries available to all users + * updates any existing VirtualBox installation + + Change these defaults by pressing "Customize" or "Change + Install Location." + +5. Press "Install" to perform the standard installation. + + The system prompts you for your password. + + ![Password prompt](/installation/images/mac-password-prompt.png) + +6. Provide your password to continue with the installation. + + When it completes, the installer provides you with some information you can + use to complete some common tasks. + + ![All finished](/installation/images/mac-page-finished.png) + +7. Press "Close" to exit. + + +## Running a Docker Container + +To run a Docker container, you: + +* create a new (or start an existing) Docker virtual machine +* switch your environment to your new VM +* use the `docker` client to create, load, and manage containers + +Once you create a machine, you can reuse it as often as you like. Like any +VirtualBox VM, it maintains its configuration between uses. + +There are two ways to use the installed tools, from the Docker Quickstart Terminal or +[from your shell](#from-your-shell). + +### From the Docker Quickstart Terminal + +1. Open the "Applications" folder or the "Launchpad". + +2. Find the Docker Quickstart Terminal and double-click to launch it. + + The application: + + * opens a terminal window + * creates a VM called `default` if it doesn't exists, starts the VM if it does + * points the terminal environment to this VM + + Once the launch completes, the Docker Quickstart Terminal reports: + + ![All finished](/installation/images/mac-success.png) + + Now, you can run `docker` commands. + +3. Verify your setup succeeded by running the `hello-world` container. + + $ docker run hello-world + Unable to find image 'hello-world:latest' locally + 511136ea3c5a: Pull complete + 31cbccb51277: Pull complete + e45a5af57b00: Pull complete + hello-world:latest: The image you are pulling has been verified. + Important: image verification is a tech preview feature and should not be + relied on to provide security. + Status: Downloaded newer image for hello-world:latest + Hello from Docker. + This message shows that your installation appears to be working correctly. + + To generate this message, Docker took the following steps: + 1. The Docker client contacted the Docker daemon. + 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. + (Assuming it was not already locally available.) + 3. The Docker daemon created a new container from that image which runs the + executable that produces the output you are currently reading. + 4. The Docker daemon streamed that output to the Docker client, which sent it + to your terminal. + + To try something more ambitious, you can run an Ubuntu container with: + $ docker run -it ubuntu bash + + For more examples and ideas, visit: + http://docs.docker.com/userguide/ + + +A more typical way to interact with the Docker tools is from your regular shell command line. + +### From your shell + +This section assumes you are running a Bash shell. You may be running a +different shell such as C Shell but the commands are the same. + +1. Create a new Docker VM. + + $ docker-machine create --driver virtualbox default + Creating VirtualBox VM... + Creating SSH key... + Starting VirtualBox VM... + Starting VM... + To see how to connect Docker to this machine, run: docker-machine env default + + This creates a new `default` in VirtualBox. + + ![default](/installation/images/default.png) + + The command also creates a machine configuration in the + `~/.docker/machine/machines/default` directory. You only need to run the + `create` command once. Then, you can use `docker-machine` to start, stop, + query, and otherwise manage the VM from the command line. + +2. List your available machines. + + $ docker-machine ls + NAME ACTIVE DRIVER STATE URL SWARM + default * virtualbox Running tcp://192.168.99.101:2376 + + If you have previously installed the deprecated Boot2Docker application or + run the Docker Quickstart Terminal, you may have a `dev` VM as well. When you + created `default`, the `docker-machine` command provided instructions + for learning how to connect the VM. + +3. Get the environment commands for your new VM. + + $ docker-machine env default + export DOCKER_TLS_VERIFY="1" + export DOCKER_HOST="tcp://192.168.99.101:2376" + export DOCKER_CERT_PATH="/Users/mary/.docker/machine/machines/default" + export DOCKER_MACHINE_NAME="default" + # Run this command to configure your shell: + # eval "$(docker-machine env default)" + +4. Connect your shell to the `default` machine. + + $ eval "$(docker-machine env default)" + +5. Run the `hello-world` container to verify your setup. + + $ docker run hello-world + + +## Learn about your Toolbox installation + +Toolbox installs the Docker Engine binary, the Docker binary on your system. When you +use the Docker Quickstart Terminal or create a `default` manually, Docker +Machine updates the `~/.docker/machine/machines/default` folder to your +system. This folder contains the configuration for the VM. + +You can create multiple VMs on your system with Docker Machine. So, you may have +more than one VM folder if you have more than one VM. To remove a VM, use the +`docker-machine rm ` command. + +## Migrate from Boot2Docker + +If you were using Boot2Docker previously, you have a pre-existing Docker +`boot2docker-vm` VM on your local system. To allow Docker Machine to manage +this older VM, you can migrate it. + +1. Open a terminal or the Docker CLI on your system. + +2. Type the following command. + + $ docker-machine create -d virtualbox --virtualbox-import-boot2docker-vm boot2docker-vm docker-vm + +3. Use the `docker-machine` command to interact with the migrated VM. + +The `docker-machine` subcommands are slightly different than the `boot2docker` +subcommands. The table below lists the equivalent `docker-machine` subcommand +and what it does: + +| `boot2docker` | `docker-machine` | `docker-machine` description | +|----------------|------------------|----------------------------------------------------------| +| init | create | Creates a new docker host. | +| up | start | Starts a stopped machine. | +| ssh | ssh | Runs a command or interactive ssh session on the machine.| +| save | - | Not applicable. | +| down | stop | Stops a running machine. | +| poweroff | stop | Stops a running machine. | +| reset | restart | Restarts a running machine. | +| config | inspect | Prints machine configuration details. | +| status | ls | Lists all machines and their status. | +| info | inspect | Displays a machine's details. | +| ip | ip | Displays the machine's ip address. | +| shellinit | env | Displays shell commands needed to configure your shell to interact with a machine | +| delete | rm | Removes a machine. | +| download | - | Not applicable. | +| upgrade | upgrade | Upgrades a machine's Docker client to the latest stable release. | + + +## Example of Docker on Mac OS X + +Work through this section to try some practical container tasks on a VM. At this +point, you should have a VM running and be connected to it through your shell. +To verify this, run the following commands: + + $ docker-machine ls + NAME ACTIVE DRIVER STATE URL SWARM + dev * virtualbox Running tcp://192.168.99.100:2376 + +The `ACTIVE` machine, in this case `dev`, is the one your environment is pointing to. + +### Access container ports + +1. Start an NGINX container on the DOCKER_HOST. + + $ docker run -d -P --name web nginx + + Normally, the `docker run` commands starts a container, runs it, and then + exits. The `-d` flag keeps the container running in the background + after the `docker run` command completes. The `-P` flag publishes exposed ports from the + container to your local host; this lets you access them from your Mac. + +2. Display your running container with `docker ps` command + + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 5fb65ff765e9 nginx:latest "nginx -g 'daemon of 3 minutes ago Up 3 minutes 0.0.0.0:49156->443/tcp, 0.0.0.0:49157->80/tcp web + + At this point, you can see `nginx` is running as a daemon. + +3. View just the container's ports. + + $ docker port web + 443/tcp -> 0.0.0.0:49156 + 80/tcp -> 0.0.0.0:49157 + + This tells you that the `web` container's port `80` is mapped to port + `49157` on your Docker host. + +4. Enter the `http://localhost:49157` address (`localhost` is `0.0.0.0`) in your browser: + + ![Bad Address](/installation/images/bad_host.png) + + This didn't work. The reason it doesn't work is your `DOCKER_HOST` address is + not the localhost address (0.0.0.0) but is instead the address of the + your Docker VM. + +5. Get the address of the `dev` VM. + + $ docker-machine ip dev + 192.168.59.103 + +6. Enter the `http://192.168.59.103:49157` address in your browser: + + ![Correct Addressing](/installation/images/good_host.png) + + Success! + +7. To stop and then remove your running `nginx` container, do the following: + + $ docker stop web + $ docker rm web + +### Mount a volume on the container + +When you start a container it automatically shares your `/Users/username` directory +with the VM. You can use this share point to mount directories onto your container. +The next exercise demonstrates how to do this. + +1. Change to your user `$HOME` directory. + + $ cd $HOME + +2. Make a new `site` directory. + + $ mkdir site + +3. Change into the `site` directory. + + $ cd site + +4. Create a new `index.html` file. + + $ echo "my new site" > index.html + +5. Start a new `nginx` container and replace the `html` folder with your `site` directory. + + $ docker run -d -P -v $HOME/site:/usr/share/nginx/html \ + --name mysite nginx + +6. Get the `mysite` container's port. + + $ docker port mysite + 80/tcp -> 0.0.0.0:49166 + 443/tcp -> 0.0.0.0:49165 + +7. Open the site in a browser: + + ![My site page](/installation/images/newsite_view.png) + +8. Try adding a page to your `$HOME/site` in real time. + + $ echo "This is cool" > cool.html + +9. Open the new page in the browser. + + ![Cool page](/installation/images/cool_view.png) + +10. Stop and then remove your running `mysite` container. + + $ docker stop mysite + $ docker rm mysite + + +## Upgrade Docker Toolbox + +To upgrade Docker Toolbox, download an re-run [the Docker Toolbox +installer](https://docker.com/toolbox/). + + +## Uninstall Docker Toolbox + +To uninstall, do the following: + +1. List your machines. + + $ docker-machine ls + NAME ACTIVE DRIVER STATE URL SWARM + dev * virtualbox Running tcp://192.168.99.100:2376 + my-docker-machine virtualbox Stopped + default virtualbox Stopped + +2. Remove each machine. + + $ docker-machine rm dev + Successfully removed dev + + Removing a machine deletes its VM from VirtualBox and from the + `~/.docker/machine/machines` directory. + +3. Remove the Docker Quickstart Terminal and Kitematic from your "Applications" folder. + +4. Remove the `docker`, `docker-compose`, and `docker-machine` commands from the `/usr/local/bin` folder. + + $ rm /usr/local/bin/docker + +5. Delete the `~/.docker` folder from your system. + + +## Learning more + +Use `docker-machine help` to list the full command line reference for Docker Machine. For more +information about using SSH or SCP to access a VM, see [the Docker Machine +documentation](https://docs.docker.com/machine/). + +You can continue with the [Docker User Guide](/userguide). If you are +interested in using the Kitematic GUI, see the [Kitermatic user +guide](/kitematic/userguide/). diff --git a/docs/installation/oracle.md b/docs/installation/oracle.md new file mode 100644 index 00000000..1a008311 --- /dev/null +++ b/docs/installation/oracle.md @@ -0,0 +1,145 @@ + + +# Oracle Linux 6 and 7 + +You do not require an Oracle Linux Support subscription to install Docker on +Oracle Linux. + +*For Oracle Linux customers with an active support subscription:* +Docker is available in either the `ol6_x86_64_addons` or `ol7_x86_64_addons` +channel for Oracle Linux 6 and Oracle Linux 7 on the [Unbreakable Linux Network +(ULN)](https://linux.oracle.com). + +*For Oracle Linux users without an active support subscription:* +Docker is available in the appropriate `ol6_addons` or `ol7_addons` repository +on [Oracle Public Yum](http://public-yum.oracle.com). + +Docker requires the use of the Unbreakable Enterprise Kernel Release 3 (3.8.13) +or higher on Oracle Linux. This kernel supports the Docker btrfs storage engine +on both Oracle Linux 6 and 7. + +Due to current Docker limitations, Docker is only able to run only on the x86_64 +architecture. + +## To enable the *addons* channel via the Unbreakable Linux Network: + +1. Enable either the *ol6\_x86\_64\_addons* or *ol7\_x86\_64\_addons* channel +via the ULN web interface. +Consult the [Unbreakable Linux Network User's +Guide](http://docs.oracle.com/cd/E52668_01/E39381/html/index.html) for +documentation on subscribing to channels. + +## To enable the *addons* repository via Oracle Public Yum: + +The latest release of Oracle Linux 6 and 7 are automatically configured to use +the Oracle Public Yum repositories during installation. However, the *addons* +repository is not enabled by default. + +To enable the *addons* repository: + +1. Edit either `/etc/yum.repos.d/public-yum-ol6.repo` or +`/etc/yum.repos.d/public-yum-ol7.repo` +and set `enabled=1` in the `[ol6_addons]` or the `[ol7_addons]` stanza. + +## Installation + +1. Ensure the appropriate *addons* channel or repository has been enabled. + +2. Use yum to install the Docker package: + + $ sudo yum install docker + +## Starting Docker + +1. Now that it's installed, start the Docker daemon: + + 1. On Oracle Linux 6: + + $ sudo service docker start + + 2. On Oracle Linux 7: + + $ sudo systemctl start docker.service + +2. If you want the Docker daemon to start automatically at boot: + + 1. On Oracle Linux 6: + + $ sudo chkconfig docker on + + 2. On Oracle Linux 7: + + $ sudo systemctl enable docker.service + +**Done!** + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + +## Using the btrfs storage engine + +Docker on Oracle Linux 6 and 7 supports the use of the btrfs storage engine. +Before enabling btrfs support, ensure that `/var/lib/docker` is stored on a +btrfs-based filesystem. Review [Chapter +5](http://docs.oracle.com/cd/E37670_01/E37355/html/ol_btrfs.html) of the [Oracle +Linux Administrator's Solution +Guide](http://docs.oracle.com/cd/E37670_01/E37355/html/index.html) for details +on how to create and mount btrfs filesystems. + +To enable btrfs support on Oracle Linux: + +1. Ensure that `/var/lib/docker` is on a btrfs filesystem. +1. Edit `/etc/sysconfig/docker` and add `-s btrfs` to the `OTHER_ARGS` field. +2. Restart the Docker daemon: + +You can now continue with the [Docker User Guide](/userguide/). + +## Uninstallation + +To uninstall the Docker package: + + $ sudo yum -y remove docker + +The above command will not remove images, containers, volumes, or user created +configuration files on your host. If you wish to delete all images, containers, +and volumes run the following command: + + $ rm -rf /var/lib/docker + +You must delete the user created configuration files manually. + +## Known issues + +### Docker unmounts btrfs filesystem on shutdown +If you're running Docker using the btrfs storage engine and you stop the Docker +service, it will unmount the btrfs filesystem during the shutdown process. You +should ensure the filesystem is mounted properly prior to restarting the Docker +service. + +On Oracle Linux 7, you can use a `systemd.mount` definition and modify the +Docker `systemd.service` to depend on the btrfs mount defined in systemd. + +### SElinux support on Oracle Linux 7 +SElinux must be set to `Permissive` or `Disabled` in `/etc/sysconfig/selinux` to +use the btrfs storage engine on Oracle Linux 7. + +## Further issues? + +If you have a current Basic or Premier Support Subscription for Oracle Linux, +you can report any issues you have with the installation of Docker via a Service +Request at [My Oracle Support](http://support.oracle.com). + +If you do not have an Oracle Linux Support Subscription, you can use the [Oracle +Linux +Forum](https://community.oracle.com/community/server_%26_storage_systems/linux/oracle_linux) for community-based support. diff --git a/docs/installation/rackspace.md b/docs/installation/rackspace.md new file mode 100644 index 00000000..07c04e87 --- /dev/null +++ b/docs/installation/rackspace.md @@ -0,0 +1,87 @@ + + +# Rackspace Cloud + +Installing Docker on Ubuntu provided by Rackspace is pretty +straightforward, and you should mostly be able to follow the +[*Ubuntu*](../ubuntulinux/#ubuntu-linux) installation guide. + +**However, there is one caveat:** + +If you are using any Linux not already shipping with the 3.8 kernel you +will need to install it. And this is a little more difficult on +Rackspace. + +Rackspace boots their servers using grub's `menu.lst` +and does not like non `virtual` packages (e.g., Xen compatible) +kernels there, although they do work. This results in +`update-grub` not having the expected result, and +you will need to set the kernel manually. + +**Do not attempt this on a production machine!** + + # update apt + $ apt-get update + + # install the new kernel + $ apt-get install linux-generic-lts-raring + +Great, now you have the kernel installed in `/boot/`, next you need to +make it boot next time. + + # find the exact names + $ find /boot/ -name '*3.8*' + + # this should return some results + +Now you need to manually edit `/boot/grub/menu.lst`, +you will find a section at the bottom with the existing options. Copy +the top one and substitute the new kernel into that. Make sure the new +kernel is on top, and double check the kernel and initrd lines point to +the right files. + +Take special care to double check the kernel and initrd entries. + + # now edit /boot/grub/menu.lst + $ vi /boot/grub/menu.lst + +It will probably look something like this: + + ## ## End Default Options ## + + title Ubuntu 12.04.2 LTS, kernel 3.8.x generic + root (hd0) + kernel /boot/vmlinuz-3.8.0-19-generic root=/dev/xvda1 ro quiet splash console=hvc0 + initrd /boot/initrd.img-3.8.0-19-generic + + title Ubuntu 12.04.2 LTS, kernel 3.2.0-38-virtual + root (hd0) + kernel /boot/vmlinuz-3.2.0-38-virtual root=/dev/xvda1 ro quiet splash console=hvc0 + initrd /boot/initrd.img-3.2.0-38-virtual + + title Ubuntu 12.04.2 LTS, kernel 3.2.0-38-virtual (recovery mode) + root (hd0) + kernel /boot/vmlinuz-3.2.0-38-virtual root=/dev/xvda1 ro quiet splash single + initrd /boot/initrd.img-3.2.0-38-virtual + +Reboot the server (either via command line or console) + + # reboot + +Verify the kernel was updated + + $ uname -a + # Linux docker-12-04 3.8.0-19-generic #30~precise1-Ubuntu SMP Wed May 1 22:26:36 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux + + # nice! 3.8. + +Now you can finish with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) +instructions. diff --git a/docs/installation/rhel.md b/docs/installation/rhel.md new file mode 100644 index 00000000..af496f67 --- /dev/null +++ b/docs/installation/rhel.md @@ -0,0 +1,184 @@ + + +# Red Hat Enterprise Linux + +Docker is supported on the following versions of RHEL: + +- Red Hat Enterprise Linux 7 + +This page instructs you to install using Docker-managed release packages and +installation mechanisms. Using these packages ensures you get the latest release +of Docker. If you wish to install using Red Hat-managed packages, consult your +Red Hat release documentation for information on Red Hat's Docker support. + +## Prerequisites + +Docker requires a 64-bit installation regardless of your Red Hat version. Docker +requires that your kernel must be 3.10 at minimum, which Red Hat 7 runs. + +To check your current kernel version, open a terminal and use `uname -r` to +display your kernel version: + + $ uname -r + 3.10.0-229.el7.x86_64 + +Finally, is it recommended that you fully update your system. Please keep in +mind that your system should be fully patched to fix any potential kernel bugs. +Any reported kernel bugs may have already been fixed on the latest kernel +packages. + +## Install Docker Engine + +There are two ways to install Docker Engine. You can use `curl` with the `get.docker.com` site. This method runs an installation script which installs via the `yum` package manager. Or you can install with the `yum` package manager directly yourself. + +### Install with the script + +You use the same installation procedure for all versions of CentOS. + +1. Log into your machine as a user with `sudo` or `root` privileges. + +2. Make sure your existing yum packages are up-to-date. + + $ sudo yum update + +3. Run the Docker installation script. + + $ curl -sSL https://get.docker.com/ | sh + +4. Start the Docker daemon. + + $ sudo service docker start + +5. Verify `docker` is installed correctly by running a test image in a container. + + $ sudo docker run hello-world + Unable to find image 'hello-world:latest' locally + latest: Pulling from hello-world + a8219747be10: Pull complete + 91c95931e552: Already exists + hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. + Digest: sha256:aa03e5d0d5553b4c3473e89c8619cf79df368babd1.7.1cf5daeb82aab55838d + Status: Downloaded newer image for hello-world:latest + Hello from Docker. + This message shows that your installation appears to be working correctly. + + To generate this message, Docker took the following steps: + 1. The Docker client contacted the Docker daemon. + 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. + (Assuming it was not already locally available.) + 3. The Docker daemon created a new container from that image which runs the + executable that produces the output you are currently reading. + 4. The Docker daemon streamed that output to the Docker client, which sent it + to your terminal. + + To try something more ambitious, you can run an Ubuntu container with: + $ docker run -it ubuntu bash + + For more examples and ideas, visit: + http://docs.docker.com/userguide/ + +### Install without the script + +1. Log into your machine as a user with `sudo` or `root` privileges. + +2. Make sure your existing yum packages are up-to-date. + + $ sudo yum update + +3. Add the yum repo yourself. + + For RHEL 7 run: + + $ cat >/etc/yum.repos.d/docker.repo <<-EOF + [dockerrepo] + name=Docker Repository + baseurl=https://yum.dockerproject.org/repo/main/centos/7 + enabled=1 + gpgcheck=1 + gpgkey=https://yum.dockerproject.org/gpg + EOF + +4. Install the Docker package. + + $ sudo yum install docker-engine + +5. Start the Docker daemon. + + $ sudo service docker start + +6. Verify `docker` is installed correctly by running a test image in a container. + + $ sudo docker run hello-world + +## Create a docker group + +The `docker` daemon binds to a Unix socket instead of a TCP port. By default +that Unix socket is owned by the user `root` and other users can access it with +`sudo`. For this reason, `docker` daemon always runs as the `root` user. + +To avoid having to use `sudo` when you use the `docker` command, create a Unix +group called `docker` and add users to it. When the `docker` daemon starts, it +makes the ownership of the Unix socket read/writable by the `docker` group. + +>**Warning**: The `docker` group is equivalent to the `root` user; For details +>on how this impacts security in your system, see [*Docker Daemon Attack +>Surface*](/articles/security/#docker-daemon-attack-surface) for details. + +To create the `docker` group and add your user: + +1. Log into your machine as a user with `sudo` or `root` privileges. + +2. Create the `docker` group and add your user. + + `sudo usermod -aG docker your_username` + +3. Log out and log back in. + + This ensures your user is running with the correct permissions. + +4. Verify your work by running `docker` without `sudo`. + + $ docker run hello-world + +## Start the docker daemon at boot + +To ensure Docker starts when you boot your system, do the following: + + $ sudo chkconfig docker on + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our Systemd article to +learn how to [customize your Systemd Docker daemon options](/articles/systemd/). + + +## Uninstall + +You can uninstall the Docker software with `yum`. + +1. List the package you have installed. + + $ yum list installed | grep docker + yum list installed | grep docker + docker-engine.x86_64 1.7.1-0.1.el7 + @/docker-engine-1.7.1-0.1.el7.x86_64 + +2. Remove the package. + + $ sudo yum -y remove docker-engine.x86_64 + + This command does not remove images, containers, volumes, or user created + configuration files on your host. + +3. To delete all images, containers, and volumes run the following command: + + $ rm -rf /var/lib/docker + +4. Locate and delete any user-created configuration files. diff --git a/docs/installation/softlayer.md b/docs/installation/softlayer.md new file mode 100644 index 00000000..622d0b17 --- /dev/null +++ b/docs/installation/softlayer.md @@ -0,0 +1,36 @@ + + +# IBM SoftLayer + +1. Create an [IBM SoftLayer account]( + https://www.softlayer.com/cloud-servers/). +2. Log in to the [SoftLayer Customer Portal]( + https://control.softlayer.com/). +3. From the *Devices* menu select [*Device List*](https://control.softlayer.com/devices) +4. Click *Order Devices* on the top right of the window below the menu bar. +5. Under *Virtual Server* click [*Hourly*](https://manage.softlayer.com/Sales/orderHourlyComputingInstance) +6. Create a new *SoftLayer Virtual Server Instance* (VSI) using the default + values for all the fields and choose: + + - The desired location for *Datacenter* + - *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* + for *Operating System*. + +7. Click the *Continue Your Order* button at the bottom right. +8. Fill out VSI *hostname* and *domain*. +9. Insert the required *User Metadata* and place the order. +10. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) + instructions. + +## What next? + +Continue with the [User Guide](/userguide/). + diff --git a/docs/installation/ubuntulinux.md b/docs/installation/ubuntulinux.md new file mode 100644 index 00000000..e41c9e91 --- /dev/null +++ b/docs/installation/ubuntulinux.md @@ -0,0 +1,367 @@ + + +# Ubuntu + +Docker is supported on these Ubuntu operating systems: + +- Ubuntu Vivid 15.04 +- Ubuntu Trusty 14.04 (LTS) +- Ubuntu Precise 12.04 (LTS) +- Ubuntu Saucy 13.10 + +This page instructs you to install using Docker-managed release packages and +installation mechanisms. Using these packages ensures you get the latest release +of Docker. If you wish to install using Ubuntu-managed packages, consult your +Ubuntu documentation. + +## Prerequisites + +Docker requires a 64-bit installation regardless of your Ubuntu version. +Additionally, your kernel must be 3.10 at minimum. The latest 3.10 minor version +or a newer maintained version are also acceptable. + +Kernels older than 3.10 lack some of the features required to run Docker +containers. These older versions are known to have bugs which cause data loss +and frequently panic under certain conditions. + +To check your current kernel version, open a terminal and use `uname -r` to display +your kernel version: + + $ uname -r + 3.11.0-15-generic + +>**Caution** Some Ubuntu OS versions **require a version higher than 3.10** to +>run Docker, see the prerequisites on this page that apply to your Ubuntu +>version. + + +### For Vivid 15.04 + +There are no prerequisites for this version. + +### For Trusty 14.04 + +There are no prerequisites for this version. + +### For Precise 12.04 (LTS) + +For Ubuntu Precise, Docker requires the 3.13 kernel version. If your kernel +version is older than 3.13, you must upgrade it. Refer to this table to see +which packages are required for your environment: + + + +
linux-image-generic-lts-trusty Generic +Linux kernel image. This kernel has AUFS built in. This is required to run +Docker.
linux-headers-generic-lts-trustyAllows packages such as ZFS and VirtualBox guest additions +which depend on them. If you didn't install the headers for your existing +kernel, then you can skip these headers for the"trusty" kernel. If you're +unsure, you should include this package for safety.
xserver-xorg-lts-trusty Optional in non-graphical environments without Unity/Xorg. +Required when running Docker on machine with a graphical environment. + +

To learn more about the reasons for these packages, read the installation +instructions for backported kernels, specifically the LTS +Enablement Stack — refer to note 5 under each version.

libgl1-mesa-glx-lts-trusty
  + +To upgrade your kernel and install the additional packages, do the following: + +1. Open a terminal on your Ubuntu host. + +2. Update your package manager. + + $ sudo apt-get update + +3. Install both the required and optional packages. + + $ sudo apt-get install linux-image-generic-lts-trusty + + Depending on your environment, you may install more as described in the preceding table. + +4. Reboot your host. + + $ sudo reboot + +5. After your system reboots, go ahead and [install Docker](#installing-docker-on-ubuntu). + + +### For Saucy 13.10 (64 bit) + +Docker uses AUFS as the default storage backend. If you don't have this +prerequisite installed, Docker's installation process adds it. + +## Installation + +Make sure you have installed the prerequisites for your Ubuntu version. Then, +install Docker using the following: + +1. Log into your Ubuntu installation as a user with `sudo` privileges. + +2. Verify that you have `curl` installed. + + $ which curl + + If `curl` isn't installed, install it after updating your manager: + + $ sudo apt-get update + $ sudo apt-get install curl + +3. Get the latest Docker package. + + $ curl -sSL https://get.docker.com/ | sh + + The system prompts you for your `sudo` password. Then, it downloads and + installs Docker and its dependencies. + +>**Note**: If your company is behind a filtering proxy, you may find that the +>`apt-key` +>command fails for the Docker repo during installation. To work around this, +>add the key directly using the following: +> +> $ curl -sSL https://get.docker.com/gpg | sudo apt-key add - + +4. Verify `docker` is installed correctly. + + $ sudo docker run hello-world + + This command downloads a test image and runs it in a container. + +## Optional configurations for Docker on Ubuntu + +This section contains optional procedures for configuring your Ubuntu to work +better with Docker. + +* [Create a docker group](#create-a-docker-group) +* [Adjust memory and swap accounting](#adjust-memory-and-swap-accounting) +* [Enable UFW forwarding](#enable-ufw-forwarding) +* [Configure a DNS server for use by Docker](#configure-a-dns-server-for-docker) +* [Configure Docker to start on boot](#configure-docker-to-start-on-boot) + +### Create a Docker group + +The `docker` daemon binds to a Unix socket instead of a TCP port. By default +that Unix socket is owned by the user `root` and other users can access it with +`sudo`. For this reason, `docker` daemon always runs as the `root` user. + +To avoid having to use `sudo` when you use the `docker` command, create a Unix +group called `docker` and add users to it. When the `docker` daemon starts, it +makes the ownership of the Unix socket read/writable by the `docker` group. + +>**Warning**: The `docker` group is equivalent to the `root` user; For details +>on how this impacts security in your system, see [*Docker Daemon Attack +>Surface*](/articles/security/#docker-daemon-attack-surface) for details. + +To create the `docker` group and add your user: + +1. Log into Ubuntu as a user with `sudo` privileges. + + This procedure assumes you log in as the `ubuntu` user. + +3. Create the `docker` group and add your user. + + $ sudo usermod -aG docker ubuntu + +3. Log out and log back in. + + This ensures your user is running with the correct permissions. + +4. Verify your work by running `docker` without `sudo`. + + $ docker run hello-world + + If this fails with a message similar to this: + + Cannot connect to the Docker daemon. Is 'docker daemon' running on this host? + + Check that the `DOCKER_HOST` environment variable is not set for your shell. + If it is, unset it. + +### Adjust memory and swap accounting + +When users run Docker, they may see these messages when working with an image: + + WARNING: Your kernel does not support cgroup swap limit. WARNING: Your + kernel does not support swap limit capabilities. Limitation discarded. + +To prevent these messages, enable memory and swap accounting on your +system. Enabling memory and swap accounting does induce both a memory +overhead and a performance degradation even when Docker is not in +use. The memory overhead is about 1% of the total available +memory. The performance degradation is roughly 10%. + +To enable memory and swap on system using GNU GRUB (GNU GRand Unified +Bootloader), do the following: + +1. Log into Ubuntu as a user with `sudo` privileges. + +2. Edit the `/etc/default/grub` file. + +3. Set the `GRUB_CMDLINE_LINUX` value as follows: + + GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" + +4. Save and close the file. + +5. Update GRUB. + + $ sudo update-grub + +6. Reboot your system. + + +### Enable UFW forwarding + +If you use [UFW (Uncomplicated Firewall)](https://help.ubuntu.com/community/UFW) +on the same host as you run Docker, you'll need to do additional configuration. +Docker uses a bridge to manage container networking. By default, UFW drops all +forwarding traffic. As a result, for Docker to run when UFW is +enabled, you must set UFW's forwarding policy appropriately. + +Also, UFW's default set of rules denies all incoming traffic. If you want to be able +to reach your containers from another host then you should also allow incoming +connections on the Docker port (default `2375`). + +To configure UFW and allow incoming connections on the Docker port: + +1. Log into Ubuntu as a user with `sudo` privileges. + +2. Verify that UFW is installed and enabled. + + $ sudo ufw status + +3. Open the `/etc/default/ufw` file for editing. + + $ sudo nano /etc/default/ufw + +4. Set the `DEFAULT_FORWARD_POLICY` policy to: + + DEFAULT_FORWARD_POLICY="ACCEPT" + +5. Save and close the file. + +6. Reload UFW to use the new setting. + + $ sudo ufw reload + +7. Allow incoming connections on the Docker port. + + $ sudo ufw allow 2375/tcp + +### Configure a DNS server for use by Docker + +Systems that run Ubuntu or an Ubuntu derivative on the desktop typically use +`127.0.0.1` as the default `nameserver` in `/etc/resolv.conf` file. The +NetworkManager also sets up `dnsmasq` to use the real DNS servers of the +connection and sets up `nameserver 127.0.0.1` in /`etc/resolv.conf`. + +When starting containers on desktop machines with these configurations, Docker +users see this warning: + + WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers + can't use it. Using default external servers : [8.8.8.8 8.8.4.4] + +The warning occurs because Docker containers can't use the local DNS nameserver. +Instead, Docker defaults to using an external nameserver. + +To avoid this warning, you can specify a DNS server for use by Docker +containers. Or, you can disable `dnsmasq` in NetworkManager. Though, disabling +`dnsmasq` might make DNS resolution slower on some networks. + +To specify a DNS server for use by Docker: + +1. Log into Ubuntu as a user with `sudo` privileges. + +2. Open the `/etc/default/docker` file for editing. + + $ sudo nano /etc/default/docker + +3. Add a setting for Docker. + + DOCKER_OPTS="--dns 8.8.8.8" + + Replace `8.8.8.8` with a local DNS server such as `192.168.1.1`. You can also + specify multiple DNS servers. Separated them with spaces, for example: + + --dns 8.8.8.8 --dns 192.168.1.1 + + >**Warning**: If you're doing this on a laptop which connects to various + >networks, make sure to choose a public DNS server. + +4. Save and close the file. + +5. Restart the Docker daemon. + + $ sudo restart docker + + +  +  + +**Or, as an alternative to the previous procedure,** disable `dnsmasq` in +NetworkManager (this might slow your network). + +1. Open the `/etc/NetworkManager/NetworkManager.conf` file for editing. + + $ sudo nano /etc/NetworkManager/NetworkManager.conf + +2. Comment out the `dns=dsnmasq` line: + + dns=dnsmasq + +3. Save and close the file. + +4. Restart both the NetworkManager and Docker. + + $ sudo restart network-manager $ sudo restart docker + +### Configure Docker to start on boot + +Ubuntu uses `systemd` as its boot and service manager `15.04` onwards and `upstart` +for versions `14.10` and below. + +For `15.04` and up, to configure the `docker` daemon to start on boot, run + + $ sudo systemctl enable docker + +  + +For `14.10` and below the above installation method automatically configures `upstart` +to start the docker daemon on boot + +## Upgrade Docker + +To install the latest version of Docker with `curl`: + + $ curl -sSL https://get.docker.com/ | sh + +## Uninstallation + +To uninstall the Docker package: + + $ sudo apt-get purge docker-engine + +To uninstall the Docker package and dependencies that are no longer needed: + + $ sudo apt-get autoremove --purge docker-engine + +The above commands will not remove images, containers, volumes, or user created +configuration files on your host. If you wish to delete all images, containers, +and volumes run the following command: + + $ rm -rf /var/lib/docker + +You must delete the user created configuration files manually. diff --git a/docs/installation/windows.md b/docs/installation/windows.md new file mode 100644 index 00000000..0737fa57 --- /dev/null +++ b/docs/installation/windows.md @@ -0,0 +1,365 @@ + + +# Windows + +> **Note**: This release of Docker deprecates the Boot2Docker command line in +> favor of Docker Machine. Use the Docker Toolbox to install Docker Machine as +> well as the other Docker tools. + +You install Docker using Docker Toolbox. Docker Toolbox includes the following Docker tools: + +* Docker Machine for running the `docker-machine` binary +* Docker Engine for running the `docker` binary +* Kitematic, the Docker GUI +* a shell preconfigured for a Docker command-line environment +* Oracle VM VirtualBox + +Because the Docker daemon uses Linux-specific kernel features, you can't run +Docker natively in Windows. Instead, you must use `docker-machine` to create and attach to a Docker VM on your machine. This VM hosts Docker for you on your Windows system. + +The Docker VM is lightweight Linux virtual machine made specifically to run the +Docker daemon on Windows. The VirtualBox VM runs completely from RAM, is a +small ~24MB download, and boots in approximately 5s. + +## Requirements + +Your machine must be running Windows 7.1, 8/8.1 or newer to run Docker. Windows 10 is not currently supported. To find out what version of Windows you have: + +1. Right click the Windows message and choose **System**. + + ![Which version](/installation/images/win_ver.png) + + If you aren't using a supported version, you could consider upgrading your + operating system. + +2. Make sure your Windows system supports Hardware Virtualization Technology and that virtualization is enabled. + + #### For Windows 8 or 8.1 + + Choose **Start > Task Manager** and navigate to the **Performance** tab. + Under **CPU** you should see the following: + + ![Release page](/installation/images/virtualization.png) + + If virtualization is not enabled on your system, follow the manufacturer's instructions for enabling it. + + ### For Windows 7 + + Run the Microsoft® Hardware-Assisted Virtualization Detection + Tool and follow the on-screen instructions. + + +> **Note**: If you have Docker hosts running and you don't wish to do a Docker Toolbox +installation, you can install the `docker.exe` using the *unofficial* Windows package +manager Chocolately. For information on how to do this, see [Docker package on +Chocolatey](http://chocolatey.org/packages/docker). + +### Learn the key concepts before installing + +In a Docker installation on Linux, your machine is both the localhost and the +Docker host. In networking, localhost means your computer. The Docker host is +the machine on which the containers run. + +On a typical Linux installation, the Docker client, the Docker daemon, and any +containers run directly on your localhost. This means you can address ports on a +Docker container using standard localhost addressing such as `localhost:8000` or +`0.0.0.0:8376`. + +![Linux Architecture Diagram](/installation/images/linux_docker_host.svg) + +In an Windows installation, the `docker` daemon is running inside a Linux virtual +machine. You use the Windows Docker client to talk to the Docker host VM. Your +Docker containers run inside this host. + +![Windows Architecture Diagram](/installation/images/win_docker_host.svg) + +In Windows, the Docker host address is the address of the Linux VM. When you +start the VM with `docker-machine` it is assigned an IP address. When you start +a container, the ports on a container map to ports on the VM. To see this in +practice, work through the exercises on this page. + + +### Installation + +If you have VirtualBox running, you must shut it down before running the +installer. + +1. Go to the [Docker Toolbox](https://www.docker.com/toolbox) page. + +2. Click the installer link to download. + +3. Install Docker Toolbox by double-clicking the installer. + + The installer launches the "Setup - Docker Toolbox" dialog. + + ![Install Docker Toolbox](/installation/images/win-welcome.png) + +4. Press "Next" to install the toolbox. + + The installer presents you with options to customize the standard + installation. By default, the standard Docker Toolbox installation: + + * installs executables for the Docker tools in `C:\Program Files\Docker Toolbox` + * updates any existing VirtualBox installation + * adds a Docker Inc. folder to your program shortcuts + * updates your `PATH` environment variable + * adds desktop icons for the Docker Quickstart Terminal and Kitematic + + This installation assumes the defaults are acceptable. + +5. Press "Next" until you reach the "Ready to Install" page. + + The system prompts you for your password. + + ![Install](/installation/images/win-page-6.png) + +6. Press "Install" to continue with the installation. + + When it completes, the installer provides you with some information you can + use to complete some common tasks. + + ![All finished](/installation/images/windows-finish.png) + +7. Press "Close" to exit. + +## Running a Docker Container + +To run a Docker container, you: + +* create a new (or start an existing) Docker virtual machine +* switch your environment to your new VM +* use the `docker` client to create, load, and manage containers + +Once you create a machine, you can reuse it as often as you like. Like any +VirtualBox VM, it maintains its configuration between uses. + +There are several ways to use the installed tools, from the Docker Quickstart Terminal or +[from your shell](#from-your-shell). + +### From the Docker Quickstart Terminal + +1. Find the Docker Quickstart Terminal icon on your Desktop and double-click to launch it. + + The application: + + * opens a terminal window + * creates a `default` if it doesn't exist, starts the VM if it does + * points the terminal environment to this VM + + Once the launch completes, you can run `docker` commands. + +3. Verify your setup succeeded by running the `hello-world` container. + + $ docker run hello-world + Unable to find image 'hello-world:latest' locally + 511136ea3c5a: Pull complete + 31cbccb51277: Pull complete + e45a5af57b00: Pull complete + hello-world:latest: The image you are pulling has been verified. + Important: image verification is a tech preview feature and should not be + relied on to provide security. + Status: Downloaded newer image for hello-world:latest + Hello from Docker. + This message shows that your installation appears to be working correctly. + + To generate this message, Docker took the following steps: + 1. The Docker client contacted the Docker daemon. + 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. + (Assuming it was not already locally available.) + 3. The Docker daemon created a new container from that image which runs the + executable that produces the output you are currently reading. + 4. The Docker daemon streamed that output to the Docker client, which sent it + to your terminal. + + To try something more ambitious, you can run an Ubuntu container with: + $ docker run -it ubuntu bash + + For more examples and ideas, visit: + http://docs.docker.com/userguide/ + + +## Using Docker from Windows Command Line Prompt (cmd.exe) + +1. Launch a Windows Command Line Prompt (cmd.exe). + + The `docker-machine` command requires `ssh.exe` in your `PATH` environment + variable. This `.exe` is in the MsysGit `bin` folder. + +2. Add this to the `%PATH%` environment variable by running: + + set PATH=%PATH%;"c:\Program Files (x86)\Git\bin" + +3. Create a new Docker VM. + + docker-machine create --driver virtualbox my-default + Creating VirtualBox VM... + Creating SSH key... + Starting VirtualBox VM... + Starting VM... + To see how to connect Docker to this machine, run: docker-machine env my-default + + The command also creates a machine configuration in the + `C:\USERS\USERNAME\.docker\machine\machines` directory. You only need to run the `create` + command once. Then, you can use `docker-machine` to start, stop, query, and + otherwise manage the VM from the command line. + +4. List your available machines. + + C:\Users\mary> docker-machine ls + NAME ACTIVE DRIVER STATE URL SWARM + my-default * virtualbox Running tcp://192.168.99.101:2376 + + If you have previously installed the deprecated Boot2Docker application or + run the Docker Quickstart Terminal, you may have a `dev` VM as well. + +5. Get the environment commands for your new VM. + + C:\Users\mary> docker-machine env --shell cmd my-default + +6. Connect your shell to the `my-default` machine. + + C:\Users\mary> eval "$(docker-machine env my-default)" + +7. Run the `hello-world` container to verify your setup. + + C:\Users\mary> docker run hello-world + +## Using Docker from PowerShell + +1. Launch a Windows PowerShell window. + +2. Add `ssh.exe` to your PATH: + + PS C:\Users\mary> $Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin" + +3. Create a new Docker VM. + + PS C:\Users\mary> docker-machine create --driver virtualbox my-default + +4. List your available machines. + + C:\Users\mary> docker-machine ls + NAME ACTIVE DRIVER STATE URL SWARM + my-default * virtualbox Running tcp://192.168.99.101:2376 + +5. Get the environment commands for your new VM. + + C:\Users\mary> docker-machine env --shell powershell my-default + +6. Connect your shell to the `my-default` machine. + + C:\Users\mary> eval "$(docker-machine env my-default)" + +7. Run the `hello-world` container to verify your setup. + + C:\Users\mary> docker run hello-world + + +## Learn about your Toolbox installation + +Toolbox installs the Docker Engine binary in the `C:\Program Files\Docker +Toolbox` directory. When you use the Docker Quickstart Terminal or create a +`default` manually, Docker Machine updates the +`C:\USERS\USERNAME\.docker\machine\machines\default` folder to your +system. This folder contains the configuration for the VM. + +You can create multiple VMs on your system with Docker Machine. So, you may have +more than one VM folder if you have more than one VM. To remove a VM, use the +`docker-machine rm ` command. + +## Migrate from Boot2Docker + +If you were using Boot2Docker previously, you have a pre-existing Docker +`boot2docker-vm` VM on your local system. To allow Docker Machine to manage +this older VM, you can migrate it. + +1. Open a terminal or the Docker CLI on your system. + +2. Type the following command. + + $ docker-machine create -d virtualbox --virtualbox-import-boot2docker-vm boot2docker-vm docker-vm + +3. Use the `docker-machine` command to interact with the migrated VM. + +The `docker-machine` subcommands are slightly different than the `boot2docker` +subcommands. The table below lists the equivalent `docker-machine` subcommand +and what it does: + +| `boot2docker` | `docker-machine` | `docker-machine` description | +|----------------|------------------|----------------------------------------------------------| +| init | create | Creates a new docker host. | +| up | start | Starts a stopped machine. | +| ssh | ssh | Runs a command or interactive ssh session on the machine.| +| save | - | Not applicable. | +| down | stop | Stops a running machine. | +| poweroff | stop | Stops a running machine. | +| reset | restart | Restarts a running machine. | +| config | inspect | Prints machine configuration details. | +| status | ls | Lists all machines and their status. | +| info | inspect | Displays a machine's details. | +| ip | ip | Displays the machine's ip address. | +| shellinit | env | Displays shell commands needed to configure your shell to interact with a machine | +| delete | rm | Removes a machine. | +| download | - | Not applicable. | +| upgrade | upgrade | Upgrades a machine's Docker client to the latest stable release. | + + +## Upgrade Docker Toolbox + +To upgrade Docker Toolbox, download an re-run [the Docker Toolbox +installer](https://www.docker.com/toolbox). + +## Container port redirection + +If you are curious, the username for the Docker default user is `docker` and the +password is `tcuser`. The latest version of `docker-machine` sets up a host only +network adaptor which provides access to the container's ports. + +If you run a container with a published port: + + $ docker run --rm -i -t -p 80:80 nginx + +Then you should be able to access that nginx server using the IP address +reported to you using: + + $ docker-machine ip + +Typically, the IP is 192.168.59.103, but it could get changed by VirtualBox's +DHCP implementation. + +## Login with PUTTY instead of using the CMD + +Docker Machine generates and uses the public/private key pair in your +`%USERPROFILE%\.ssh` directory so to log in you need to use the private key from +this same directory. The private key needs to be converted into the format PuTTY +uses. You can do this with +[puttygen](http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html): + +1. Open `puttygen.exe` and load ("File"->"Load" menu) the private key from + `%USERPROFILE%\.ssh\id_boot2docker` + +2. Click "Save Private Key". + +3. Use the saved file to login with PuTTY using `docker@127.0.0.1:2022`. + +## Uninstallation + +You can uninstall Docker Toolbox using Window's standard process for removing +programs. This process does not remove the `docker-install.exe` file. You must +delete that file yourself. + +## Learn more + +You can continue with the [Docker User Guide](/userguide). If you are +interested in using the Kitematic GUI, see the [Kitermatic user +guide](/kitematic/userguide/). diff --git a/docs/introduction/understanding-docker.md b/docs/introduction/understanding-docker.md new file mode 100644 index 00000000..d597c3ea --- /dev/null +++ b/docs/introduction/understanding-docker.md @@ -0,0 +1,293 @@ + + +# Understand the architecture +**What is Docker?** + +Docker is an open platform for developing, shipping, and running applications. +Docker is designed to deliver your applications faster. With Docker you can +separate your applications from your infrastructure AND treat your +infrastructure like a managed application. Docker helps you ship code faster, +test faster, deploy faster, and shorten the cycle between writing code and +running code. + +Docker does this by combining a lightweight container virtualization platform +with workflows and tooling that help you manage and deploy your applications. + +At its core, Docker provides a way to run almost any application securely +isolated in a container. The isolation and security allow you to run many +containers simultaneously on your host. The lightweight nature of containers, +which run without the extra load of a hypervisor, means you can get more out of +your hardware. + +Surrounding the container virtualization are tooling and a platform which can +help you in several ways: + +* getting your applications (and supporting components) into Docker containers +* distributing and shipping those containers to your teams for further development +and testing +* deploying those applications to your production environment, + whether it be in a local data center or the Cloud. + +## What can I use Docker for? + +*Faster delivery of your applications* + +Docker is perfect for helping you with the development lifecycle. Docker +allows your developers to develop on local containers that contain your +applications and services. It can then integrate into a continuous integration and +deployment workflow. + +For example, your developers write code locally and share their development stack via +Docker with their colleagues. When they are ready, they push their code and the +stack they are developing onto a test environment and execute any required +tests. From the testing environment, you can then push the Docker images into +production and deploy your code. + +*Deploying and scaling more easily* + +Docker's container-based platform allows for highly portable workloads. Docker +containers can run on a developer's local host, on physical or virtual machines +in a data center, or in the Cloud. + +Docker's portability and lightweight nature also make dynamically managing +workloads easy. You can use Docker to quickly scale up or tear down applications +and services. Docker's speed means that scaling can be near real time. + +*Achieving higher density and running more workloads* + +Docker is lightweight and fast. It provides a viable, cost-effective alternative +to hypervisor-based virtual machines. This is especially useful in high density +environments: for example, building your own Cloud or Platform-as-a-Service. But +it is also useful for small and medium deployments where you want to get more +out of the resources you have. + +## What are the major Docker components? +Docker has two major components: + + +* Docker: the open source container virtualization platform. +* [Docker Hub](https://hub.docker.com): our Software-as-a-Service + platform for sharing and managing Docker containers. + + +> **Note:** Docker is licensed under the open source Apache 2.0 license. + +## What is Docker's architecture? +Docker uses a client-server architecture. The Docker *client* talks to the +Docker *daemon*, which does the heavy lifting of building, running, and +distributing your Docker containers. Both the Docker client and the daemon *can* +run on the same system, or you can connect a Docker client to a remote Docker +daemon. The Docker client and daemon communicate via sockets or through a +RESTful API. + +![Docker Architecture Diagram](/article-img/architecture.svg) + +### The Docker daemon +As shown in the diagram above, the Docker daemon runs on a host machine. The +user does not directly interact with the daemon, but instead through the Docker +client. + +### The Docker client +The Docker client, in the form of the `docker` binary, is the primary user +interface to Docker. It accepts commands from the user and communicates back and +forth with a Docker daemon. + +### Inside Docker +To understand Docker's internals, you need to know about three components: + +* Docker images. +* Docker registries. +* Docker containers. + +#### Docker images + +A Docker image is a read-only template. For example, an image could contain an Ubuntu +operating system with Apache and your web application installed. Images are used to create +Docker containers. Docker provides a simple way to build new images or update existing +images, or you can download Docker images that other people have already created. +Docker images are the **build** component of Docker. + +#### Docker registries +Docker registries hold images. These are public or private stores from which you +upload or download images. The public Docker registry is provided with the +[Docker Hub](http://hub.docker.com). It serves a huge collection of existing +images for your use. These can be images you create yourself or you can use +images that others have previously created. Docker registries are the +**distribution** component of Docker. + +#### Docker containers +Docker containers are similar to a directory. A Docker container holds everything that +is needed for an application to run. Each container is created from a Docker +image. Docker containers can be run, started, stopped, moved, and deleted. Each +container is an isolated and secure application platform. Docker containers are the + **run** component of Docker. + +## So how does Docker work? +So far, we've learned that: + +1. You can build Docker images that hold your applications. +2. You can create Docker containers from those Docker images to run your + applications. +3. You can share those Docker images via + [Docker Hub](https://hub.docker.com) or your own registry. + +Let's look at how these elements combine together to make Docker work. + +### How does a Docker image work? +We've already seen that Docker images are read-only templates from which Docker +containers are launched. Each image consists of a series of layers. Docker +makes use of [union file systems](http://en.wikipedia.org/wiki/UnionFS) to +combine these layers into a single image. Union file systems allow files and +directories of separate file systems, known as branches, to be transparently +overlaid, forming a single coherent file system. + +One of the reasons Docker is so lightweight is because of these layers. When you +change a Docker image—for example, update an application to a new version— a new layer +gets built. Thus, rather than replacing the whole image or entirely +rebuilding, as you may do with a virtual machine, only that layer is added or +updated. Now you don't need to distribute a whole new image, just the update, +making distributing Docker images faster and simpler. + +Every image starts from a base image, for example `ubuntu`, a base Ubuntu image, +or `fedora`, a base Fedora image. You can also use images of your own as the +basis for a new image, for example if you have a base Apache image you could use +this as the base of all your web application images. + +> **Note:** Docker usually gets these base images from +> [Docker Hub](https://hub.docker.com). + +Docker images are then built from these base images using a simple, descriptive +set of steps we call *instructions*. Each instruction creates a new layer in our +image. Instructions include actions like: + +* Run a command. +* Add a file or directory. +* Create an environment variable. +* What process to run when launching a container from this image. + +These instructions are stored in a file called a `Dockerfile`. Docker reads this +`Dockerfile` when you request a build of an image, executes the instructions, and +returns a final image. + +### How does a Docker registry work? +The Docker registry is the store for your Docker images. Once you build a Docker +image you can *push* it to a public registry such as the one provided by [Docker +Hub](https://hub.docker.com) or to your own registry running behind your +firewall. + +Using the Docker client, you can search for already published images and then +pull them down to your Docker host to build containers from them. + +[Docker Hub](https://hub.docker.com) provides both public and private storage +for images. Public storage is searchable and can be downloaded by anyone. +Private storage is excluded from search results and only you and your users can +pull images down and use them to build containers. You can [sign up for a storage plan +here](https://hub.docker.com/plans). + +### How does a container work? +A container consists of an operating system, user-added files, and meta-data. As +we've seen, each container is built from an image. That image tells Docker +what the container holds, what process to run when the container is launched, and +a variety of other configuration data. The Docker image is read-only. When +Docker runs a container from an image, it adds a read-write layer on top of the +image (using a union file system as we saw earlier) in which your application can +then run. + +### What happens when you run a container? +Either by using the `docker` binary or via the API, the Docker client tells the Docker +daemon to run a container. + + $ docker run -i -t ubuntu /bin/bash + +Let's break down this command. The Docker client is launched using the `docker` +binary with the `run` option telling it to launch a new container. The bare +minimum the Docker client needs to tell the Docker daemon to run the container +is: + +* What Docker image to build the container from, here `ubuntu`, a base Ubuntu +image; +* The command you want to run inside the container when it is launched, +here `/bin/bash`, to start the Bash shell inside the new container. + +So what happens under the hood when we run this command? + +In order, Docker does the following: + +- **Pulls the `ubuntu` image:** Docker checks for the presence of the `ubuntu` +image and, if it doesn't exist locally on the host, then Docker downloads it from +[Docker Hub](https://hub.docker.com). If the image already exists, then Docker +uses it for the new container. +- **Creates a new container:** Once Docker has the image, it uses it to create a +container. +- **Allocates a filesystem and mounts a read-write _layer_:** The container is created in +the file system and a read-write layer is added to the image. +- **Allocates a network / bridge interface:** Creates a network interface that allows the +Docker container to talk to the local host. +- **Sets up an IP address:** Finds and attaches an available IP address from a pool. +- **Executes a process that you specify:** Runs your application, and; +- **Captures and provides application output:** Connects and logs standard input, outputs +and errors for you to see how your application is running. + +You now have a running container! From here you can manage your container, interact with +your application and then, when finished, stop and remove your container. + +## The underlying technology +Docker is written in Go and makes use of several Linux kernel features to +deliver the functionality we've seen. + +### Namespaces +Docker takes advantage of a technology called `namespaces` to provide the +isolated workspace we call the *container*. When you run a container, Docker +creates a set of *namespaces* for that container. + +This provides a layer of isolation: each aspect of a container runs in its own +namespace and does not have access outside it. + +Some of the namespaces that Docker uses are: + + - **The `pid` namespace:** Used for process isolation (PID: Process ID). + - **The `net` namespace:** Used for managing network interfaces (NET: + Networking). + - **The `ipc` namespace:** Used for managing access to IPC + resources (IPC: InterProcess Communication). + - **The `mnt` namespace:** Used for managing mount-points (MNT: Mount). + - **The `uts` namespace:** Used for isolating kernel and version identifiers. (UTS: Unix +Timesharing System). + +### Control groups +Docker also makes use of another technology called `cgroups` or control groups. +A key to running applications in isolation is to have them only use the +resources you want. This ensures containers are good multi-tenant citizens on a +host. Control groups allow Docker to share available hardware resources to +containers and, if required, set up limits and constraints. For example, +limiting the memory available to a specific container. + +### Union file systems +Union file systems, or UnionFS, are file systems that operate by creating layers, +making them very lightweight and fast. Docker uses union file systems to provide +the building blocks for containers. Docker can make use of several union file system variants +including: AUFS, btrfs, vfs, and DeviceMapper. + +### Container format +Docker combines these components into a wrapper we call a container format. The +default container format is called `libcontainer`. Docker also supports +traditional Linux containers using [LXC](https://linuxcontainers.org/). In the +future, Docker may support other container formats, for example, by integrating with +BSD Jails or Solaris Zones. + +## Next steps +### Installing Docker +Visit the [installation section](/installation/#installation). + +### The Docker user guide +[Learn Docker in depth](/userguide/). + + diff --git a/docs/misc/deprecated.md b/docs/misc/deprecated.md new file mode 100644 index 00000000..28ede419 --- /dev/null +++ b/docs/misc/deprecated.md @@ -0,0 +1,74 @@ + + +# Deprecated Features + +The following list of features are deprecated. + +### LXC built-in exec driver +**Deprecated In Release: v1.8** + +**Target For Removal In Release: v1.10** + +The built-in LXC execution driver is deprecated for an external implementation. +The lxc-conf flag and API fields will also be removed. + +### Old Command Line Options +**Deprecated In Release: [v1.8.0](/release-notes/#docker-engine-1-8-0)** + +**Target For Removal In Release: v1.10** + +The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: + + docker daemon -H ... + +The following single-dash (`-opt`) variant of certain command line options +are deprecated and replaced with double-dash options (`--opt`): + + docker attach -nostdin + docker attach -sig-proxy + docker build -no-cache + docker build -rm + docker commit -author + docker commit -run + docker events -since + docker history -notrunc + docker images -notrunc + docker inspect -format + docker ps -beforeId + docker ps -notrunc + docker ps -sinceId + docker rm -link + docker run -cidfile + docker run -cpuset + docker run -dns + docker run -entrypoint + docker run -expose + docker run -link + docker run -lxc-conf + docker run -n + docker run -privileged + docker run -volumes-from + docker search -notrunc + docker search -stars + docker search -t + docker search -trusted + docker tag -force + +The following double-dash options are deprecated and have no replacement: + + docker run --networking + docker ps --since-id + docker ps --before-id + docker search --trusted + +### Interacting with V1 registries + +Version 1.8.3 adds a flag (`--disable-legacy-registry=false`) which prevents the docker daemon from `pull`, `push`, and `login` operations against v1 registries. Though disabled by default, this signals the intent to deprecate the v1 protocol. diff --git a/docs/misc/faq.md b/docs/misc/faq.md new file mode 100644 index 00000000..38a54f1a --- /dev/null +++ b/docs/misc/faq.md @@ -0,0 +1,273 @@ + + +# Frequently Asked Questions (FAQ) + +If you don't see your question here, feel free to submit new ones to +. Or, you can fork [the +repo](https://github.com/docker/docker) and contribute them yourself by editing +the documentation sources. + + +### How much does Docker cost? + +Docker is 100% free. It is open source, so you can use it without paying. + +### What open source license are you using? + +We are using the Apache License Version 2.0, see it here: +[https://github.com/docker/docker/blob/master/LICENSE]( +https://github.com/docker/docker/blob/master/LICENSE) + +### Does Docker run on Mac OS X or Windows? + +Docker currently runs only on Linux, but you can use VirtualBox to run Docker in +a virtual machine on your box, and get the best of both worlds. Check out the +[*Mac OS X*](../installation/mac/#macosx) and [*Microsoft +Windows*](../installation/windows/#windows) installation guides. The small Linux +distribution Docker Machine can be run inside virtual machines on these two +operating systems. + +> **Note:** if you are using a remote Docker daemon, such as Boot2Docker, +> then _do not_ type the `sudo` before the `docker` commands shown in the +> documentation's examples. + +### How do containers compare to virtual machines? + +They are complementary. VMs are best used to allocate chunks of hardware +resources. Containers operate at the process level, which makes them very +lightweight and perfect as a unit of software delivery. + +### What does Docker add to just plain LXC? + +Docker is not a replacement for LXC. "LXC" refers to capabilities of the Linux +kernel (specifically namespaces and control groups) which allow sandboxing +processes from one another, and controlling their resource allocations. On top +of this low-level foundation of kernel features, Docker offers a high-level tool +with several powerful functionalities: + + - *Portable deployment across machines.* Docker defines a format for bundling + an application and all its dependencies into a single object which can be + transferred to any Docker-enabled machine, and executed there with the + guarantee that the execution environment exposed to the application will be the + same. LXC implements process sandboxing, which is an important pre-requisite + for portable deployment, but that alone is not enough for portable deployment. + If you sent me a copy of your application installed in a custom LXC + configuration, it would almost certainly not run on my machine the way it does + on yours, because it is tied to your machine's specific configuration: + networking, storage, logging, distro, etc. Docker defines an abstraction for + these machine-specific settings, so that the exact same Docker container can + run - unchanged - on many different machines, with many different + configurations. + + - *Application-centric.* Docker is optimized for the deployment of + applications, as opposed to machines. This is reflected in its API, user + interface, design philosophy and documentation. By contrast, the `lxc` helper + scripts focus on containers as lightweight machines - basically servers that + boot faster and need less RAM. We think there's more to containers than just + that. + + - *Automatic build.* Docker includes [*a tool for developers to automatically + assemble a container from their source + code*](../reference/builder/#dockerbuilder), with full control over application + dependencies, build tools, packaging etc. They are free to use `make`, `maven`, + `chef`, `puppet`, `salt,` Debian packages, RPMs, source tarballs, or any + combination of the above, regardless of the configuration of the machines. + + - *Versioning.* Docker includes git-like capabilities for tracking successive + versions of a container, inspecting the diff between versions, committing new + versions, rolling back etc. The history also includes how a container was + assembled and by whom, so you get full traceability from the production server + all the way back to the upstream developer. Docker also implements incremental + uploads and downloads, similar to `git pull`, so new versions of a container + can be transferred by only sending diffs. + + - *Component re-use.* Any container can be used as a [*"base image"*]( + ../terms/image/#base-image-def) to create more specialized components. This can + be done manually or as part of an automated build. For example you can prepare + the ideal Python environment, and use it as a base for 10 different + applications. Your ideal Postgresql setup can be re-used for all your future + projects. And so on. + + - *Sharing.* Docker has access to a public registry [on Docker Hub](https://registry.hub.docker.com/) + where thousands of people have uploaded useful containers: anything from Redis, + CouchDB, Postgres to IRC bouncers to Rails app servers to Hadoop to base images + for various Linux distros. The + [*registry*](../reference/api/registry_index_spec/#registryindexspec) also + includes an official "standard library" of useful containers maintained by the + Docker team. The registry itself is open-source, so anyone can deploy their own + registry to store and transfer private containers, for internal server + deployments for example. + + - *Tool ecosystem.* Docker defines an API for automating and customizing the + creation and deployment of containers. There are a huge number of tools + integrating with Docker to extend its capabilities. PaaS-like deployment + (Dokku, Deis, Flynn), multi-node orchestration (Maestro, Salt, Mesos, Openstack + Nova), management dashboards (docker-ui, Openstack Horizon, Shipyard), + configuration management (Chef, Puppet), continuous integration (Jenkins, + Strider, Travis), etc. Docker is rapidly establishing itself as the standard + for container-based tooling. + +### What is different between a Docker container and a VM? + +There's a great StackOverflow answer [showing the differences]( +http://stackoverflow.com/questions/16047306/how-is-docker-io-different-from-a-normal-virtual-machine). + +### Do I lose my data when the container exits? + +Not at all! Any data that your application writes to disk gets preserved in its +container until you explicitly delete the container. The file system for the +container persists even after the container halts. + +### How far do Docker containers scale? + +Some of the largest server farms in the world today are based on containers. +Large web deployments like Google and Twitter, and platform providers such as +Heroku and dotCloud all run on container technology, at a scale of hundreds of +thousands or even millions of containers running in parallel. + +### How do I connect Docker containers? + +Currently the recommended way to link containers is via the link primitive. You +can see details of how to [work with links here](/userguide/dockerlinks). + +Also useful for more flexible service portability is the [Ambassador linking +pattern](/articles/ambassador_pattern_linking/). + +### How do I run more than one process in a Docker container? + +Any capable process supervisor such as [http://supervisord.org/]( +http://supervisord.org/), runit, s6, or daemontools can do the trick. Docker +will start up the process management daemon which will then fork to run +additional processes. As long as the processor manager daemon continues to run, +the container will continue to as well. You can see a more substantial example +[that uses supervisord here](/articles/using_supervisord/). + +### What platforms does Docker run on? + +Linux: + + - Ubuntu 12.04, 13.04 et al + - Fedora 19/20+ + - RHEL 6.5+ + - CentOS 6+ + - Gentoo + - ArchLinux + - openSUSE 12.3+ + - CRUX 3.0+ + +Cloud: + + - Amazon EC2 + - Google Compute Engine + - Rackspace + +### How do I report a security issue with Docker? + +You can learn about the project's security policy +[here](https://www.docker.com/security/) and report security issues to this +[mailbox](mailto:security@docker.com). + +### Why do I need to sign my commits to Docker with the DCO? + +Please read [our blog post]( +http://blog.docker.com/2014/01/docker-code-contributions-require-developer-certificate-of-origin/) on the introduction of the DCO. + +### When building an image, should I prefer system libraries or bundled ones? + +*This is a summary of a discussion on the [docker-dev mailing list]( +https://groups.google.com/forum/#!topic/docker-dev/L2RBSPDu1L0).* + +Virtually all programs depend on third-party libraries. Most frequently, they +will use dynamic linking and some kind of package dependency, so that when +multiple programs need the same library, it is installed only once. + +Some programs, however, will bundle their third-party libraries, because they +rely on very specific versions of those libraries. For instance, Node.js bundles +OpenSSL; MongoDB bundles V8 and Boost (among others). + +When creating a Docker image, is it better to use the bundled libraries, or +should you build those programs so that they use the default system libraries +instead? + +The key point about system libraries is not about saving disk or memory space. +It is about security. All major distributions handle security seriously, by +having dedicated security teams, following up closely with published +vulnerabilities, and disclosing advisories themselves. (Look at the [Debian +Security Information](https://www.debian.org/security/) for an example of those +procedures.) Upstream developers, however, do not always implement similar +practices. + +Before setting up a Docker image to compile a program from source, if you want +to use bundled libraries, you should check if the upstream authors provide a +convenient way to announce security vulnerabilities, and if they update their +bundled libraries in a timely manner. If they don't, you are exposing yourself +(and the users of your image) to security vulnerabilities. + +Likewise, before using packages built by others, you should check if the +channels providing those packages implement similar security best practices. +Downloading and installing an "all-in-one" .deb or .rpm sounds great at first, +except if you have no way to figure out that it contains a copy of the OpenSSL +library vulnerable to the [Heartbleed](http://heartbleed.com/) bug. + +### Why is `DEBIAN_FRONTEND=noninteractive` discouraged in Dockerfiles? + +When building Docker images on Debian and Ubuntu you may have seen errors like: + + unable to initialize frontend: Dialog + +These errors don't stop the image from being built but inform you that the +installation process tried to open a dialog box, but was unable to. Generally, +these errors are safe to ignore. + +Some people circumvent these errors by changing the `DEBIAN_FRONTEND` +environment variable inside the Dockerfile using: + + ENV DEBIAN_FRONTEND=noninteractive + +This prevents the installer from opening dialog boxes during installation which +stops the errors. + +While this may sound like a good idea, it *may* have side effects. The +`DEBIAN_FRONTEND` environment variable will be inherited by all images and +containers built from your image, effectively changing their behavior. People +using those images will run into problems when installing software +interactively, because installers will not show any dialog boxes. + +Because of this, and because setting `DEBIAN_FRONTEND` to `noninteractive` is +mainly a 'cosmetic' change, we *discourage* changing it. + +If you *really* need to change its setting, make sure to change it back to its +[default value](https://www.debian.org/releases/stable/i386/ch05s03.html.en) +afterwards. + +### Why do I get `Connection reset by peer` when making a request to a service running in a container? + +Typically, this message is returned if the service is already bound to your +localhost. As a result, requests coming to the container from outside are +dropped. To correct this problem, change the service's configuration on your +localhost so that the service accepts requests from all IPs. If you aren't sure +how to do this, check the documentation for your OS. + + +### Where can I find more answers? + +You can find more answers on: + + +- [Docker user mailinglist](https://groups.google.com/d/forum/docker-user) +- [Docker developer mailinglist](https://groups.google.com/d/forum/docker-dev) +- [IRC, docker on freenode](irc://chat.freenode.net#docker) +- [GitHub](https://github.com/docker/docker) +- [Ask questions on Stackoverflow](http://stackoverflow.com/search?q=docker) +- [Join the conversation on Twitter](http://twitter.com/docker) + +Looking for something else to read? Checkout the [User Guide](/userguide/). diff --git a/docs/misc/index.md b/docs/misc/index.md new file mode 100644 index 00000000..a3a0a36f --- /dev/null +++ b/docs/misc/index.md @@ -0,0 +1,120 @@ + + +# About Docker + +**Develop, Ship and Run Any Application, Anywhere** + +[**Docker**](https://www.docker.com) is a platform for developers and sysadmins +to develop, ship, and run applications. Docker lets you quickly assemble +applications from components and eliminates the friction that can come when +shipping code. Docker lets you get your code tested and deployed into production +as fast as possible. + +Docker consists of: + +* The Docker Engine - our lightweight and powerful open source container + virtualization technology combined with a work flow for building + and containerizing your applications. +* [Docker Hub](https://hub.docker.com) - our SaaS service for + sharing and managing your application stacks. + +## Why Docker? + +*Faster delivery of your applications* + +* We want your environment to work better. Docker containers, + and the work flow that comes with them, help your developers, + sysadmins, QA folks, and release engineers work together to get your code + into production and make it useful. We've created a standard + container format that lets developers care about their applications + inside containers while sysadmins and operators can work on running the + container in your deployment. This separation of duties streamlines and + simplifies the management and deployment of code. +* We make it easy to build new containers, enable rapid iteration of + your applications, and increase the visibility of changes. This + helps everyone in your organization understand how an application works + and how it is built. +* Docker containers are lightweight and fast! Containers have + sub-second launch times, reducing the cycle + time of development, testing, and deployment. + +*Deploy and scale more easily* + +* Docker containers run (almost) everywhere. You can deploy + containers on desktops, physical servers, virtual machines, into + data centers, and up to public and private clouds. +* Since Docker runs on so many platforms, it's easy to move your + applications around. You can easily move an application from a + testing environment into the cloud and back whenever you need. +* Docker's lightweight containers also make scaling up and + down fast and easy. You can quickly launch more containers when + needed and then shut them down easily when they're no longer needed. + +*Get higher density and run more workloads* + +* Docker containers don't need a hypervisor, so you can pack more of + them onto your hosts. This means you get more value out of every + server and can potentially reduce what you spend on equipment and + licenses. + +*Faster deployment makes for easier management* + +* As Docker speeds up your work flow, it gets easier to make lots + of small changes instead of huge, big bang updates. Smaller + changes mean reduced risk and more uptime. + +## About this guide + +The [Understanding Docker section](introduction/understanding-docker.md) will help you: + + - See how Docker works at a high level + - Understand the architecture of Docker + - Discover Docker's features; + - See how Docker compares to virtual machines + - See some common use cases. + +### Installation guides + +The [installation section](/installation/#installation) will show you how to +install Docker on a variety of platforms. + + +### Docker user guide + +To learn about Docker in more detail and to answer questions about usage and +implementation, check out the [Docker User Guide](/userguide/). + +## Release notes + +A summary of the changes in each release in the current series can now be found +on the separate [Release Notes page](/release-notes/) + +## Feature Deprecation Policy + +As changes are made to Docker there may be times when existing features +will need to be removed or replaced with newer features. Before an existing +feature is removed it will be labeled as "deprecated" within the documentation +and will remain in Docker for, usually, at least 2 releases. After that time +it may be removed. + +Users are expected to take note of the list of deprecated features each +release and plan their migration away from those features, and (if applicable) +towards the replacement features as soon as possible. + +The complete list of deprecated features can be found on the +[Deprecated Features page](deprecated). + +## Licensing + +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. + diff --git a/docs/misc/release-notes.md b/docs/misc/release-notes.md new file mode 100644 index 00000000..5104cd40 --- /dev/null +++ b/docs/misc/release-notes.md @@ -0,0 +1,161 @@ + + +# Deprecated Features + +To see the complete list of deprecated features please see the +[Deprecated Features](deprecated) page. + +# Removed Features + +The following features have been removed in this release: + +* None! + +# Release notes version 1.6.0 +(2015-04-16) + +You can view release notes for earlier version of Docker by selecting the +desired version from the drop-down list at the top right of this page. For the +formal release announcement, see [the Docker +blog](https://blog.docker.com/2015/04/docker-release-1-6/). + + + +## Docker Engine 1.6.0 features + +For a complete list of engine patches, fixes, and other improvements, see the +[merge PR on GitHub](https://github.com/docker/docker/pull/11635). You'll also +find [a changelog in the project +repository](https://github.com/docker/docker/blob/master/CHANGELOG.md). + +## Docker Engine 1.6.0 features + +For a complete list of engine patches, fixes, and other improvements, see the +[merge PR on GitHub](https://github.com/docker/docker/pull/11635). You'll also +find [a changelog in the project +repository](https://github.com/docker/docker/blob/master/CHANGELOG.md). + + +| Feature | Description | +|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Container and Image Labels | Labels allow you to attach user-defined metadata to containers and images that can be used by your tools. For additional information on using labels, see [Apply custom metadata](https://docs.docker.com/userguide/labels-custom-metadata/#add-labels-to-images-the-label-instruction) in the documentation. | +| Windows Client preview | The Windows Client can be used just like the Mac OS X client is today with a remote host. Our testing infrastructure was scaled out to accommodate Windows Client testing on every PR to the Engine. See the Azure blog for [details on using this new client](http://azure.microsoft.com/blog/2015/04/16/docker-client-for-windows-is-now-available). | +| Logging drivers | The new logging driver follows the exec driver and storage driver concepts already available in Engine today. There is a new option `--log-driver` to `docker run` command. See the `run` reference for a [description on how to use this option](https://docs.docker.com/reference/run/#logging-drivers-log-driver). | +| Image digests | When you pull, build, or run images, you specify them in the form `namespace/repository:tag`, or even just `repository`. In this release, you are now able to pull, run, build and refer to images by a new content addressable identifier called a “digest” with the syntax `namespace/repo@digest`. See the the command line reference for [examples of using the digest](https://docs.docker.com/reference/commandline/cli/#listing-image-digests). | +| Custom cgroups | Containers are made from a combination of namespaces, capabilities, and cgroups. Docker already supports custom namespaces and capabilities. Additionally, in this release we’ve added support for custom cgroups. Using the `--cgroup-parent` flag, you can pass a specific `cgroup` to run a container in. See [the command line reference for more information](https://docs.docker.com/reference/commandline/cli/#create). | +| Ulimits | You can now specify the default `ulimit` settings for all containers when configuring the daemon. For example:`docker -d --default-ulimit nproc=1024:2048` See [Default Ulimits](https://docs.docker.com/reference/commandline/cli/#default-ulimits) in this documentation. | +| Commit and import Dockerfile | You can now make changes to images on the fly without having to re-build the entire image. The feature `commit --change` and `import --change` allows you to apply standard changes to a new image. These are expressed in the Dockerfile syntax and used to modify the image. For details on how to use these, see the [commit](https://docs.docker.com/reference/commandline/cli/#commit) and [import](https://docs.docker.com/reference/commandline/cli/#import). | + +### Known issues in Engine + +This section lists significant known issues present in Docker as of release date. +For an exhaustive list of issues, see [the issues list on the project +repository](https://github.com/docker/docker/issues/). + +* *Unexpected File Permissions in Containers* +An idiosyncrasy in AUFS prevented permissions from propagating predictably +between upper and lower layers. This caused issues with accessing private +keys, database instances, etc. This issue was closed in this release: +[GitHub Issue 783](https://github.com/docker/docker/issues/783). + + +* *Docker Hub incompatible with Safari 8* +Docker Hub had multiple issues displaying on Safari 8, the default browser for +OS X 10.10 (Yosemite). Most notably, changes in the way Safari handled cookies +means that the user was repeatedly logged out. +Recently, Safari fixed the bug that was causing all the issues. If you upgrade +to Safari 8.0.5 which was just released last week and see if that fixes your +issues. You might have to flush your cookies if it doesn't work right away. +For more information, see the [Docker forum +post](https://forums.docker.com/t/new-safari-in-yosemite-issue/300). + +## Docker Registry 2.0 features + +This release includes Registry 2.0. The Docker Registry is a central server for +pushing and pulling images. In this release, it was completely rewritten in Go +around a new set of distribution APIs + +- **Webhook notifications**: You can now configure the Registry to send Webhooks +when images are pushed. Spin off a CI build, send a notification to IRC – +whatever you want! Included in the documentation is a detailed [notification +specification](https://docs.docker.com/registry/notifications/). + +- **Native TLS support**: This release makes it easier to secure a registry with +TLS. This documentation includes [expanded examples of secure +deployments](https://docs.docker.com/registry/deploying/). + +- **New Distribution APIs**: This release includes an expanded set of new +distribution APIs. You can read the [detailed specification +here](https://docs.docker.com/registry/spec/api/). + + +## Docker Compose 1.2 + +For a complete list of compose patches, fixes, and other improvements, see the +[changelog in the project +repository](https://github.com/docker/compose/blob/master/CHANGES.md). The +project also makes a [set of release +notes](https://github.com/docker/compose/releases/tag/1.2.0) on the project. + +- **extends**: You can use `extends` to share configuration between services +with the keyword “extends”. With extends, you can refer to a service defined +elsewhere and include its configuration in a locally-defined service, while also +adding or overriding configuration as necessary. The documentation describes +[how to use extends in your +configuration](https://docs.docker.com/compose/extends/#extending-services-in- +compose). + +- **Relative directory handling may cause breaking change**: Compose now treats +directories passed to build, filenames passed to `env_file` and volume host +paths passed to volumes as relative to the configuration file's directory. +Previously, they were treated as relative to the directory where you were +running `docker-compose`. In the majority of cases, the location of the +configuration file and where you ran `docker-compose` were the same directory. +Now, you can use the `-f|--file` argument to specify a configuration file in +another directory. + + +## Docker Swarm 0.2 + +You'll find the [release for download on +GitHub](https://github.com/docker/swarm/releases/tag/v0.2.0) and [the +documentation here](https://docs.docker.com/swarm/). This release includes the +following features: + +- **Spread strategy**: A new strategy for scheduling containers on your cluster +which evenly spreads them over available nodes. +- **More Docker commands supported**: More progress has been made towards +supporting the complete Docker API, such as pulling and inspecting images. +- **Clustering drivers**: There are not any third-party drivers yet, but the +first steps have been made towards making a pluggable driver interface that will +make it possible to use Swarm with clustering systems such as Mesos. + + +## Docker Machine 0.2 Pre-release + +You'll find the [release for download on +GitHub](https://github.com/docker/machine/releases) and [the documentation +here](https://docs.docker.com/machine/). For a complete list of machine changes +see [the changelog in the project +repository](https://github.com/docker/machine/blob/master/CHANGES.md#020-2015-03 +-22). + +- **Cleaner driver interface**: It is now much easier to write drivers for providers. +- **More reliable and consistent provisioning**: Provisioning servers is now +handled centrally by Machine instead of letting each driver individually do it. +- **Regenerate TLS certificates**: A new command has been added to regenerate a +host’s TLS certificates for good security practice and for if a host’s IP +address changes. + +## Docker Hub Enterprise & Commercially Supported Docker Engine + +See the [DHE and CS Docker Engine release notes](docker-hub-enterprise/release-notes.md). diff --git a/docs/misc/search.md b/docs/misc/search.md new file mode 100644 index 00000000..277ba26e --- /dev/null +++ b/docs/misc/search.md @@ -0,0 +1,16 @@ + + +# Search + +*Please activate JavaScript to enable the search functionality.* + +## How To Search + +From here you can search these documents. Enter your search words into +the box below and click "search". Note that the search function will +automatically search for all of the words. Pages containing fewer words +won't appear in the result list. diff --git a/docs/project/advanced-contributing.md b/docs/project/advanced-contributing.md new file mode 100644 index 00000000..04d50862 --- /dev/null +++ b/docs/project/advanced-contributing.md @@ -0,0 +1,159 @@ + + +# Advanced contributing + +In this section, you learn about the more advanced contributions you can make. +They are advanced because they have a more involved workflow or require greater +programming experience. Don't be scared off though, if you like to stretch and +challenge yourself, this is the place for you. + +This section gives generalized instructions for advanced contributions. You'll +read about the workflow but there are not specific descriptions of commands. +Your goal should be to understand the processes described. + +At this point, you should have read and worked through the earlier parts of +the project contributor guide. You should also have + made at least one project contribution. + +## Refactor or cleanup proposal + +A refactor or cleanup proposal changes Docker's internal structure without +altering the external behavior. To make this type of proposal: + +1. Fork `docker/docker`. + +2. Make your changes in a feature branch. + +3. Sync and rebase with `master` as you work. + +3. Run the full test suite. + +4. Submit your code through a pull request (PR). + + The PR's title should have the format: + + **Cleanup:** _short title_ + + If your changes required logic changes, note that in your request. + +5. Work through Docker's review process until merge. + + +## Design proposal + +A design proposal solves a problem or adds a feature to the Docker software. +The process for submitting design proposals requires two pull requests, one +for the design and one for the implementation. + +![Simple process](/project/images/proposal.png) + +The important thing to notice is that both the design pull request and the +implementation pull request go through a review. In other words, there is +considerable time commitment in a design proposal; so, you might want to pair +with someone on design work. + +The following provides greater detail on the process: + +1. Come up with an idea. + + Ideas usually come from limitations users feel working with a product. So, + take some time to really use Docker. Try it on different platforms; explore + how it works with different web applications. Go to some community events + and find out what other users want. + +2. Review existing issues and proposals to make sure no other user is proposing a similar idea. + + The design proposals are all online in our GitHub pull requests. + +3. Talk to the community about your idea. + + We have lots of community forums + where you can get feedback on your idea. Float your idea in a forum or two + to get some commentary going on it. + +4. Fork `docker/docker` and clone the repo to your local host. + +5. Create a new Markdown file in the area you wish to change. + + For example, if you want to redesign our daemon create a new file under the + `daemon/` folder. + +6. Name the file descriptively, for example `redesign-daemon-proposal.md`. + +7. Write a proposal for your change into the file. + + This is a Markdown file that describes your idea. Your proposal + should include information like: + + * Why is this change needed or what are the use cases? + * What are the requirements this change should meet? + * What are some ways to design/implement this feature? + * Which design/implementation do you think is best and why? + * What are the risks or limitations of your proposal? + + This is your chance to convince people your idea is sound. + +8. Submit your proposal in a pull request to `docker/docker`. + + The title should have the format: + + **Proposal:** _short title_ + + The body of the pull request should include a brief summary of your change + and then say something like "_See the file for a complete description_". + +9. Refine your proposal through review. + + The maintainers and the community review your proposal. You'll need to + answer questions and sometimes explain or defend your approach. This is + chance for everyone to both teach and learn. + +10. Pull request accepted. + + Your request may also be rejected. Not every idea is a good fit for Docker. + Let's assume though your proposal succeeded. + +11. Implement your idea. + + Implementation uses all the standard practices of any contribution. + + * fork `docker/docker` + * create a feature branch + * sync frequently back to master + * test as you go and full test before a PR + + If you run into issues, the community is there to help. + +12. When you have a complete implementation, submit a pull request back to `docker/docker`. + +13. Review and iterate on your code. + + If you are making a large code change, you can expect greater scrutiny + during this phase. + +14. Acceptance and merge! + +## About the advanced process + +Docker is a large project. Our core team gets a great many design proposals. +Design proposal discussions can span days, weeks, and longer. The number of comments can reach the 100s. +In that situation, following the discussion flow and the decisions reached is crucial. + +Making a pull request with a design proposal simplifies this process: +* you can leave comments on specific design proposal line +* replies around line are easy to track +* as a proposal changes and is updated, pages reset as line items resolve +* GitHub maintains the entire history + +While proposals in pull requests do not end up merged into a master repository, they provide a convenient tool for managing the design process. diff --git a/docs/project/coding-style.md b/docs/project/coding-style.md new file mode 100644 index 00000000..65a48612 --- /dev/null +++ b/docs/project/coding-style.md @@ -0,0 +1,103 @@ + + +# Coding style checklist + +This checklist summarizes the material you experienced working through [make a +code contribution](/project/make-a-contribution) and [advanced +contributing](/project/advanced-contributing). The checklist applies to both +program code and documentation code. + +## Change and commit code + +* Fork the `docker/docker` repository. + +* Make changes on your fork in a feature branch. Name your branch `XXXX-something` + where `XXXX` is the issue number you are working on. + +* Run `gofmt -s -w file.go` on each changed file before + committing your changes. Most editors have plug-ins that do this automatically. + +* Run `golint` on each changed file before + committing your changes. + +* Update the documentation when creating or modifying features. + +* Commits that fix or close an issue should reference them in the commit message + `Closes #XXXX` or `Fixes #XXXX`. Mentions help by automatically closing the + issue on a merge. + +* After every commit, run the test suite and ensure it is passing. + +* Sync and rebase frequently as you code to keep up with `docker` master. + +* Set your `git` signature and make sure you sign each commit. + +* Do not add yourself to the `AUTHORS` file. This file is autogenerated from the + Git history. + +## Tests and testing + +* Submit unit tests for your changes. + +* Make use of the builtin Go test framework built. + +* Use existing Docker test files (`name_test.go`) for inspiration. + +* Run the full test suite on your + branch before submitting a pull request. + +* Run `make docs` to build the documentation and then check it locally. + +* Use an online grammar + checker or similar to test you documentation changes for clarity, + concision, and correctness. + +## Pull requests + +* Sync and cleanly rebase on top of Docker's `master` without multiple branches + mixed into the PR. + +* Before the pull request, squash your commits into logical units of work using + `git rebase -i` and `git push -f`. + +* Include documentation changes in the same commit so that a revert would + remove all traces of the feature or fix. + +* Reference each issue in your pull request description (`#XXXX`) + +## Respond to pull requests reviews + +* Docker maintainers use LGTM (**l**ooks-**g**ood-**t**o-**m**e) in PR comments + to indicate acceptance. + +* Code review comments may be added to your pull request. Discuss, then make + the suggested modifications and push additional commits to your feature + branch. + +* Incorporate changes on your feature branch and push to your fork. This + automatically updates your open pull request. + +* Post a comment after pushing to alert reviewers to PR changes; pushing a + change does not send notifications. + +* A change requires LGTMs from an absolute majority maintainers of an + affected component. For example, if you change `docs/` and `registry/` code, + an absolute majority of the `docs/` and the `registry/` maintainers must + approve your PR. + +## Merges after pull requests + +* After a merge, [a master build](https://master.dockerproject.org/) is + available almost immediately. + +* If you made a documentation change, you can see it at + [docs.master.dockerproject.org](http://docs.master.dockerproject.org/). diff --git a/docs/project/create-pr.md b/docs/project/create-pr.md new file mode 100644 index 00000000..dacb39b0 --- /dev/null +++ b/docs/project/create-pr.md @@ -0,0 +1,138 @@ + + +# Create a pull request (PR) + +A pull request (PR) sends your changes to the Docker maintainers for review. You +create a pull request on GitHub. A pull request "pulls" changes from your forked +repository into the `docker/docker` repository. + +You can see the +list of active pull requests to Docker on GitHub. + +## Check your work + +Before you create a pull request, check your work. + +1. In a terminal window, go to the root of your `docker-fork` repository. + + $ cd ~/repos/docker-fork + +2. Checkout your feature branch. + + $ git checkout 11038-fix-rhel-link + Switched to branch '11038-fix-rhel-link' + +3. Run the full test suite on your branch. + + $ make test + + All the tests should pass. If they don't, find out why and correct the + situation. + +4. Optionally, if modified the documentation, build the documentation: + + $ make docs + +5. Commit and push any changes that result from your checks. + +## Rebase your branch + +Always rebase and squash your commits before making a pull request. + +1. Checkout your feature branch in your local `docker-fork` repository. + + This is the branch associated with your request. + +2. Fetch any last minute changes from `docker/docker`. + + $ git fetch upstream master + From github.com:docker/docker + * branch master -> FETCH_HEAD + +3. Start an interactive rebase. + + $ git rebase -i upstream/master + +4. Rebase opens an editor with a list of commits. + + pick 1a79f55 Tweak some of the other text for grammar + pick 53e4983 Fix a link + pick 3ce07bb Add a new line about RHEL + +5. Replace the `pick` keyword with `squash` on all but the first commit. + + pick 1a79f55 Tweak some of the other text for grammar + squash 53e4983 Fix a link + squash 3ce07bb Add a new line about RHEL + + After you save the changes and quit from the editor, git starts + the rebase, reporting the progress along the way. Sometimes + your changes can conflict with the work of others. If git + encounters a conflict, it stops the rebase, and prints guidance + for how to correct the conflict. + +6. Edit and save your commit message. + + $ git commit -s + + Make sure your message includes your signature. + +7. Force push any changes to your fork on GitHub. + + $ git push -f origin 11038-fix-rhel-link + +## Create a PR on GitHub + +You create and manage PRs on GitHub: + +1. Open your browser to your fork on GitHub. + + You should see the latest activity from your branch. + + ![Latest commits](/project/images/latest_commits.png) + + +2. Click "Compare & pull request." + + The system displays the pull request dialog. + + ![PR dialog](/project/images/to_from_pr.png) + + The pull request compares your changes to the `master` branch on the + `docker/docker` repository. + +3. Edit the dialog's description and add a reference to the issue you are fixing. + + GitHub helps you out by searching for the issue as you type. + + ![Fixes issue](/project/images/fixes_num.png) + +4. Scroll down and verify the PR contains the commits and changes you expect. + + For example, is the file count correct? Are the changes in the files what + you expect? + + ![Commits](/project/images/commits_expected.png) + +5. Press "Create pull request". + + The system creates the request and opens it for you in the `docker/docker` + repository. + + ![Pull request made](/project/images/pull_request_made.png) + + +## Where to go next + +Congratulations, you've created your first pull request to Docker. The next +step is for you learn how to [participate in your PR's +review](/project/review-pr/). diff --git a/docs/project/doc-style.md b/docs/project/doc-style.md new file mode 100644 index 00000000..1f2ebe1c --- /dev/null +++ b/docs/project/doc-style.md @@ -0,0 +1,283 @@ + + +# Docker documentation: style & grammar conventions + +## Style standards + +Over time, different publishing communities have written standards for the style +and grammar they prefer in their publications. These standards are called +[style guides](http://en.wikipedia.org/wiki/Style_guide). Generally, Docker’s +documentation uses the standards described in the +[Associated Press's (AP) style guide](http://en.wikipedia.org/wiki/AP_Stylebook). +If a question about syntactical, grammatical, or lexical practice comes up, +refer to the AP guide first. If you don’t have a copy of (or online subscription +to) the AP guide, you can almost always find an answer to a specific question by +searching the web. If you can’t find an answer, please ask a +[maintainer](https://github.com/docker/docker/blob/master/docs/MAINTAINERS) and +we will find the answer. + +That said, please don't get too hung up on using correct style. We'd rather have +you submit good information that doesn't conform to the guide than no +information at all. Docker's tech writers are always happy to help you with the +prose, and we promise not to judge or use a red pen! + +> **Note:** +> The documentation is written with paragraphs wrapped at 80 column lines to +> make it easier for terminal use. You can probably set up your favorite text +> editor to do this automatically for you. + +### Prose style + +In general, try to write simple, declarative prose. We prefer short, +single-clause sentences and brief three-to-five sentence paragraphs. Try to +choose vocabulary that is straightforward and precise. Avoid creating new terms, +using obscure terms or, in particular, using a lot of jargon. For example, use +"use" instead of leveraging "leverage". + +That said, don’t feel like you have to write for localization or for +English-as-a-second-language (ESL) speakers specifically. Assume you are writing +for an ordinary speaker of English with a basic university education. If your +prose is simple, clear, and straightforward it will translate readily. + +One way to think about this is to assume Docker’s users are generally university +educated and read at at least a "16th" grade level (meaning they have a +university degree). You can use a [readability +tester](https://readability-score.com/) to help guide your judgement. For +example, the readability score for the phrase "Containers should be ephemeral" +is around the 13th grade level (first year at university), and so is acceptable. + +In all cases, we prefer clear, concise communication over stilted, formal +language. Don't feel like you have to write documentation that "sounds like +technical writing." + +### Metaphor and figurative language + +One exception to the "don’t write directly for ESL" rule is to avoid the use of +metaphor or other +[figurative language](http://en.wikipedia.org/wiki/Literal_and_figurative_language) to +describe things. There are too many cultural and social issues that can prevent +a reader from correctly interpreting a metaphor. + +## Specific conventions + +Below are some specific recommendations (and a few deviations) from AP style +that we use in our docs. + +### Contractions + +As long as your prose does not become too slangy or informal, it's perfectly +acceptable to use contractions in our documentation. Make sure to use +apostrophes correctly. + +### Use of dashes in a sentence. + +Dashes refers to the en dash (–) and the em dash (—). Dashes can be used to +separate parenthetical material. + +Usage Example: This is an example of a Docker client – which uses the Big Widget +to run – and does x, y, and z. + +Use dashes cautiously and consider whether commas or parentheses would work just +as well. We always emphasize short, succinct sentences. + +More info from the always handy [Grammar Girl site](http://www.quickanddirtytips.com/education/grammar/dashes-parentheses-and-commas). + +### Pronouns + +It's okay to use first and second person pronouns, especially if it lets you avoid a passive construction. Specifically, always use "we" to +refer to Docker and "you" to refer to the user. For example, "We built the +`exec` command so you can resize a TTY session." That said, in general, try to write simple, imperative sentences that avoid the use of pronouns altogether. Say "Now, enter your SSH key" rather than "You can now enter your SSH key." + +As much as possible, avoid using gendered pronouns ("he" and "she", etc.). +Either recast the sentence so the pronoun is not needed or, less preferably, +use "they" instead. If you absolutely can't get around using a gendered pronoun, +pick one and stick to it. Which one you choose is up to you. One common +convention is to use the pronoun of the author's gender, but if you prefer to +default to "he" or "she", that's fine too. + +### Capitalization + +#### In general + +Only proper nouns should be capitalized in body text. In general, strive to be +as strict as possible in applying this rule. Avoid using capitals for emphasis +or to denote "specialness". + +The word "Docker" should always be capitalized when referring to either the +company or the technology. The only exception is when the term appears in a code +sample. + +#### Starting sentences + +Because code samples should always be written exactly as they would appear +on-screen, you should avoid starting sentences with a code sample. + +#### In headings + +Headings take sentence capitalization, meaning that only the first letter is +capitalized (and words that would normally be capitalized in a sentence, e.g., +"Docker"). Do not use Title Case (i.e., capitalizing every word) for headings. Generally, we adhere to [AP style +for titles](http://www.quickanddirtytips.com/education/grammar/capitalizing-titles). + +### Periods + +We prefer one space after a period at the end of a sentence, not two. + +See [lists](#lists) below for how to punctuate list items. + +### Abbreviations and acronyms + +* Exempli gratia (e.g.) and id est ( i.e.): these should always have periods and +are always followed by a comma. + +* Acronyms are pluralized by simply adding "s", e.g., PCs, OSs. + +* On first use on a given page, the complete term should be used, with the +abbreviation or acronym in parentheses. E.g., Red Hat Enterprise Linux (RHEL). +The exception is common, non-technical acronyms like AKA or ASAP. Note that +acronyms other than i.e. and e.g. are capitalized. + +* Other than "e.g." and "i.e." (as discussed above), acronyms do not take +periods, PC not P.C. + + +### Lists + +When writing lists, keep the following in mind: + +Use bullets when the items being listed are independent of each other and the +order of presentation is not important. + +Use numbers for steps that have to happen in order or if you have mentioned the +list in introductory text. For example, if you wrote "There are three config +settings available for SSL, as follows:", you would number each config setting +in the subsequent list. + +In all lists, if an item is a complete sentence, it should end with a +period. Otherwise, we prefer no terminal punctuation for list items. +Each item in a list should start with a capital. + +### Numbers + +Write out numbers in body text and titles from one to ten. From 11 on, use numerals. + +### Notes + +Use notes sparingly and only to bring things to the reader's attention that are +critical or otherwise deserving of being called out from the body text. Please +format all notes as follows: + + > **Note:** + > One line of note text + > another line of note text + +### Avoid excess use of "i.e." + +Minimize your use of "i.e.". It can add an unnecessary interpretive burden on +the reader. Avoid writing "This is a thing, i.e., it is like this". Just +say what it is: "This thing is …" + +### Preferred usages + +#### Login vs. log in. + +A "login" is a noun (one word), as in "Enter your login". "Log in" is a compound +verb (two words), as in "Log in to the terminal". + +### Oxford comma + +One way in which we differ from AP style is that Docker’s docs use the [Oxford +comma](http://en.wikipedia.org/wiki/Serial_comma) in all cases. That’s our +position on this controversial topic, we won't change our mind, and that’s that! + +### Code and UI text styling + +We require `code font` styling (monospace, sans-serif) for all text that refers +to a command or other input or output from the CLI. This includes file paths +(e.g., `/etc/hosts/docker.conf`). If you enclose text in backticks (`) markdown +will style the text as code. + +Text from a CLI should be quoted verbatim, even if it contains errors or its +style contradicts this guide. You can add "(sic)" after the quote to indicate +the errors are in the quote and are not errors in our docs. + +Text taken from a GUI (e.g., menu text or button text) should appear in "double +quotes". The text should take the exact same capitalisation, etc. as appears in +the GUI. E.g., Click "Continue" to save the settings. + +Text that refers to a keyboard command or hotkey is capitalized (e.g., Ctrl-D). + +When writing CLI examples, give the user hints by making the examples resemble +exactly what they see in their shell: + +* Indent shell examples by 4 spaces so they get rendered as code blocks. +* Start typed commands with `$ ` (dollar space), so that they are easily + differentiated from program output. +* Program output has no prefix. +* Comments begin with # (hash space). +* In-container shell commands, begin with `$$ ` (dollar dollar space). + +Please test all code samples to ensure that they are correct and functional so +that users can successfully cut-and-paste samples directly into the CLI. + +## Pull requests + +The pull request (PR) process is in place so that we can ensure changes made to +the docs are the best changes possible. A good PR will do some or all of the +following: + +* Explain why the change is needed +* Point out potential issues or questions +* Ask for help from experts in the company or the community +* Encourage feedback from core developers and others involved in creating the + software being documented. + +Writing a PR that is singular in focus and has clear objectives will encourage +all of the above. Done correctly, the process allows reviewers (maintainers and +community members) to validate the claims of the documentation and identify +potential problems in communication or presentation. + +### Commit messages + +In order to write clear, useful commit messages, please follow these +[recommendations](http://robots.thoughtbot.com/5-useful-tips-for-a-better-commit-message). + +## Links + +For accessibility and usability reasons, avoid using phrases such as "click +here" for link text. Recast your sentence so that the link text describes the +content of the link, as we did in the +["Commit messages" section](#commit-messages) above. + +You can use relative links (../linkeditem) to link to other pages in Docker's +documentation. + +## Graphics + +When you need to add a graphic, try to make the file-size as small as possible. +If you need help reducing file-size of a high-resolution image, feel free to +contact us for help. +Usually, graphics should go in the same directory as the .md file that +references them, or in a subdirectory for images if one already exists. + +The preferred file format for graphics is PNG, but GIF and JPG are also +acceptable. + +If you are referring to a specific part of the UI in an image, use +call-outs (circles and arrows or lines) to highlight what you’re referring to. +Line width for call-outs should not exceed five pixels. The preferred color for +call-outs is red. + +Be sure to include descriptive alt-text for the graphic. This greatly helps +users with accessibility issues. + +Lastly, be sure you have permission to use any included graphics. \ No newline at end of file diff --git a/docs/project/find-an-issue.md b/docs/project/find-an-issue.md new file mode 100644 index 00000000..590795c2 --- /dev/null +++ b/docs/project/find-an-issue.md @@ -0,0 +1,246 @@ + + + + + +# Find and claim an issue + +On this page, you choose what you want to work on. As a contributor you can work +on whatever you want. If you are new to contributing, you should start by +working with our known issues. + +## Understand the issue types + +An existing issue is something reported by a Docker user. As issues come in, +our maintainers triage them. Triage is its own topic. For now, it is important +for you to know that triage includes ranking issues according to difficulty. + +Triaged issues have one of these labels: + + + + + + + + + + + + + + + + + + + + + + + + + + +
LevelExperience level guideline
exp/beginnerYou have made less than 10 contributions in your life time to any open source project.
exp/noviceYou have made more than 10 contributions to an open source project or at least 5 contributions to Docker.
exp/proficientYou have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines.
exp/expertYou have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines.
exp/masterYou have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines.
+ +As the table states, these labels are meant as guidelines. You might have +written a whole plugin for Docker in a personal project and never contributed to +Docker. With that kind of experience, you could take on an exp/expert or exp/master level task. + +## Claim a beginner or novice issue + +In this section, you find and claim an open documentation lines issue. + + +1. Go to the `docker/docker` repository. + +2. Click on the "Issues" link. + + A list of the open issues appears. + + ![Open issues](/project/images/issue_list.png) + +3. Look for the exp/beginner items on the list. + +4. Click on the "labels" dropdown and select exp/beginner. + + The system filters to show only open exp/beginner issues. + +5. Open an issue that interests you. + + The comments on the issues can tell you both the problem and the potential + solution. + +6. Make sure that no other user has chosen to work on the issue. + + We don't allow external contributors to assign issues to themselves. So, you + need to read the comments to find if a user claimed the issue by leaving a + `#dibs` comment on the issue. + +7. When you find an open issue that both interests you and is unclaimed, add a +`#dibs` comment. + + ![Easy issue](/project/images/easy_issue.png) + + This example uses issue 11038. Your issue # will be different depending on + what you claimed. After a moment, Gordon the Docker bot, changes the issue + status to claimed. + +8. Make a note of the issue number; you'll need it later. + +## Sync your fork and create a new branch + +If you have followed along in this guide, you forked the `docker/docker` +repository. Maybe that was an hour ago or a few days ago. In any case, before +you start working on your issue, sync your repository with the upstream +`docker/docker` master. Syncing ensures your repository has the latest +changes. + +To sync your repository: + +1. Open a terminal on your local host. + +2. Change directory to the `docker-fork` root. + + $ cd ~/repos/docker-fork + +3. Checkout the master branch. + + $ git checkout master + Switched to branch 'master' + Your branch is up-to-date with 'origin/master'. + + Recall that `origin/master` is a branch on your remote GitHub repository. + +4. Make sure you have the upstream remote `docker/docker` by listing them. + + $ git remote -v + origin https://github.com/moxiegirl/docker.git (fetch) + origin https://github.com/moxiegirl/docker.git (push) + upstream https://github.com/docker/docker.git (fetch) + upstream https://github.com/docker/docker.git (push) + + If the `upstream` is missing, add it. + + $ git remote add upstream https://github.com/docker/docker.git + +5. Fetch all the changes from the `upstream master` branch. + + $ git fetch upstream master + remote: Counting objects: 141, done. + remote: Compressing objects: 100% (29/29), done. + remote: Total 141 (delta 52), reused 46 (delta 46), pack-reused 66 + Receiving objects: 100% (141/141), 112.43 KiB | 0 bytes/s, done. + Resolving deltas: 100% (79/79), done. + From github.com:docker/docker + * branch master -> FETCH_HEAD + + This command says get all the changes from the `master` branch belonging to + the `upstream` remote. + +7. Rebase your local master with the `upstream/master`. + + $ git rebase upstream/master + First, rewinding head to replay your work on top of it... + Fast-forwarded master to upstream/master. + + This command applies all the commits from the upstream master to your local + master. + +8. Check the status of your local branch. + + $ git status + On branch master + Your branch is ahead of 'origin/master' by 38 commits. + (use "git push" to publish your local commits) + nothing to commit, working directory clean + + Your local repository now has all the changes from the `upstream` remote. You + need to push the changes to your own remote fork which is `origin master`. + +9. Push the rebased master to `origin master`. + + $ git push origin master + Username for 'https://github.com': moxiegirl + Password for 'https://moxiegirl@github.com': + Counting objects: 223, done. + Compressing objects: 100% (38/38), done. + Writing objects: 100% (69/69), 8.76 KiB | 0 bytes/s, done. + Total 69 (delta 53), reused 47 (delta 31) + To https://github.com/moxiegirl/docker.git + 8e107a9..5035fa1 master -> master + +9. Create a new feature branch to work on your issue. + + Your branch name should have the format `XXXX-descriptive` where `XXXX` is + the issue number you are working on. For example: + + $ git checkout -b 11038-fix-rhel-link + Switched to a new branch '11038-fix-rhel-link' + + Your branch should be up-to-date with the `upstream/master`. Why? Because you + branched off a freshly synced master. Let's check this anyway in the next + step. + +9. Rebase your branch from upstream/master. + + $ git rebase upstream/master + Current branch 11038-fix-rhel-link is up to date. + + At this point, your local branch, your remote repository, and the Docker + repository all have identical code. You are ready to make changes for your + issue. + + +## Where to go next + +At this point, you know what you want to work on and you have a branch to do +your work in. Go onto the next section to learn [how to work on your +changes](/project/work-issue/). diff --git a/docs/project/get-help.md b/docs/project/get-help.md new file mode 100644 index 00000000..27494b06 --- /dev/null +++ b/docs/project/get-help.md @@ -0,0 +1,153 @@ + + + + +# Where to chat or get help + +There are several communications channels you can use to chat with Docker +community members and developers. + + + + + + + + + + + + + + + + + + + +
Internet Relay Chat (IRC) + +

+ IRC a direct line to our most knowledgeable Docker users. + The #docker and #docker-dev group on + irc.freenode.net. IRC was first created in 1988. + So, it is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+ Read our IRC quickstart guide below for an easy way to get started. +
Google Groups + There are two groups. + Docker-user + is for people using Docker containers. + The docker-dev + group is for contributors and other people contributing to the Docker + project. +
Twitter + You can follow Docker's twitter + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 7000K Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +## IRC Quickstart + +IRC can also be overwhelming for new users. This quickstart shows you +the easiest way to connect to IRC. + +1. In your browser open http://webchat.freenode.net + + ![Login screen](/project/images/irc_connect.png) + + +2. Fill out the form. + + + + + + + + + + + + + + +
NicknameThe short name you want to be known as in IRC.
Channels#docker
reCAPTCHAUse the value provided.
+ +3. Click "Connect". + + The system connects you to chat. You'll see a lot of text. At the bottom of + the display is a command line. Just above the command line the system asks + you to register. + + ![Login screen](/project/images/irc_after_login.png) + + +4. In the command line, register your nickname. + + /msg NickServ REGISTER password youremail@example.com + + ![Login screen](/project/images/register_nic.png) + + The IRC system sends an email to the address you + enter. The email contains instructions for completing your registration. + +5. Open your mail client and look for the email. + + ![Login screen](/project/images/register_email.png) + +6. Back in the browser, complete the registration according to the email. + + /msg NickServ VERIFY REGISTER moxiegirl_ acljtppywjnr + +7. Join the `#docker` group using the following command. + + /j #docker + + You can also join the `#docker-dev` group. + + /j #docker-dev + +8. To ask questions to the channel just type messages in the command line. + + ![Login screen](/project/images/irc_chat.png) + +9. To quit, close the browser window. + + +### Tips and learning more about IRC + +Next time you return to log into chat, you'll need to re-enter your password +on the command line using this command: + + /msg NickServ identify + +If you forget or lose your password see the FAQ on +freenode.net to learn how to recover it. + +This quickstart was meant to get you up and into IRC very quickly. If you find +IRC useful there is a lot more to learn. Drupal, another open source project, +actually has +written a lot of good documentation about using IRC for their project +(thanks Drupal!). diff --git a/docs/project/images/box.png b/docs/project/images/box.png new file mode 100644 index 00000000..642385ae Binary files /dev/null and b/docs/project/images/box.png differ diff --git a/docs/project/images/branch-sig.png b/docs/project/images/branch-sig.png new file mode 100644 index 00000000..b30f007b Binary files /dev/null and b/docs/project/images/branch-sig.png differ diff --git a/docs/project/images/checked.png b/docs/project/images/checked.png new file mode 100644 index 00000000..93ab2be9 Binary files /dev/null and b/docs/project/images/checked.png differ diff --git a/docs/project/images/commits_expected.png b/docs/project/images/commits_expected.png new file mode 100644 index 00000000..d3d8b1e3 Binary files /dev/null and b/docs/project/images/commits_expected.png differ diff --git a/docs/project/images/contributor-edit.png b/docs/project/images/contributor-edit.png new file mode 100644 index 00000000..52737d7b Binary files /dev/null and b/docs/project/images/contributor-edit.png differ diff --git a/docs/project/images/copy_url.png b/docs/project/images/copy_url.png new file mode 100644 index 00000000..a715019e Binary files /dev/null and b/docs/project/images/copy_url.png differ diff --git a/docs/project/images/easy_issue.png b/docs/project/images/easy_issue.png new file mode 100644 index 00000000..6d346bcd Binary files /dev/null and b/docs/project/images/easy_issue.png differ diff --git a/docs/project/images/existing_issue.png b/docs/project/images/existing_issue.png new file mode 100644 index 00000000..6757e60b Binary files /dev/null and b/docs/project/images/existing_issue.png differ diff --git a/docs/project/images/existing_issue.snagproj b/docs/project/images/existing_issue.snagproj new file mode 100644 index 00000000..05ae2b0c Binary files /dev/null and b/docs/project/images/existing_issue.snagproj differ diff --git a/docs/project/images/fixes_num.png b/docs/project/images/fixes_num.png new file mode 100644 index 00000000..df52f27f Binary files /dev/null and b/docs/project/images/fixes_num.png differ diff --git a/docs/project/images/fork_docker.png b/docs/project/images/fork_docker.png new file mode 100644 index 00000000..f7c557cd Binary files /dev/null and b/docs/project/images/fork_docker.png differ diff --git a/docs/project/images/fresh_container.png b/docs/project/images/fresh_container.png new file mode 100644 index 00000000..7f69f2d3 Binary files /dev/null and b/docs/project/images/fresh_container.png differ diff --git a/docs/project/images/git_bash.png b/docs/project/images/git_bash.png new file mode 100644 index 00000000..153fd2fb Binary files /dev/null and b/docs/project/images/git_bash.png differ diff --git a/docs/project/images/give_try.png b/docs/project/images/give_try.png new file mode 100644 index 00000000..c0495276 Binary files /dev/null and b/docs/project/images/give_try.png differ diff --git a/docs/project/images/gordon.jpeg b/docs/project/images/gordon.jpeg new file mode 100644 index 00000000..8a0df7d4 Binary files /dev/null and b/docs/project/images/gordon.jpeg differ diff --git a/docs/project/images/in_room.png b/docs/project/images/in_room.png new file mode 100644 index 00000000..4fdec81b Binary files /dev/null and b/docs/project/images/in_room.png differ diff --git a/docs/project/images/include_gcc.png b/docs/project/images/include_gcc.png new file mode 100644 index 00000000..e48f50cd Binary files /dev/null and b/docs/project/images/include_gcc.png differ diff --git a/docs/project/images/irc_after_login.png b/docs/project/images/irc_after_login.png new file mode 100644 index 00000000..79496c80 Binary files /dev/null and b/docs/project/images/irc_after_login.png differ diff --git a/docs/project/images/irc_chat.png b/docs/project/images/irc_chat.png new file mode 100644 index 00000000..6266020f Binary files /dev/null and b/docs/project/images/irc_chat.png differ diff --git a/docs/project/images/irc_connect.png b/docs/project/images/irc_connect.png new file mode 100644 index 00000000..f411aabc Binary files /dev/null and b/docs/project/images/irc_connect.png differ diff --git a/docs/project/images/irc_login.png b/docs/project/images/irc_login.png new file mode 100644 index 00000000..a7a1dc7e Binary files /dev/null and b/docs/project/images/irc_login.png differ diff --git a/docs/project/images/issue_list.png b/docs/project/images/issue_list.png new file mode 100644 index 00000000..c0aefdb4 Binary files /dev/null and b/docs/project/images/issue_list.png differ diff --git a/docs/project/images/latest_commits.png b/docs/project/images/latest_commits.png new file mode 100644 index 00000000..791683a5 Binary files /dev/null and b/docs/project/images/latest_commits.png differ diff --git a/docs/project/images/list_example.png b/docs/project/images/list_example.png new file mode 100644 index 00000000..a306e6e7 Binary files /dev/null and b/docs/project/images/list_example.png differ diff --git a/docs/project/images/locate_branch.png b/docs/project/images/locate_branch.png new file mode 100644 index 00000000..b865cfcb Binary files /dev/null and b/docs/project/images/locate_branch.png differ diff --git a/docs/project/images/path_variable.png b/docs/project/images/path_variable.png new file mode 100644 index 00000000..52f197a5 Binary files /dev/null and b/docs/project/images/path_variable.png differ diff --git a/docs/project/images/proposal.png b/docs/project/images/proposal.png new file mode 100644 index 00000000..250781a7 Binary files /dev/null and b/docs/project/images/proposal.png differ diff --git a/docs/project/images/proposal.snagproj b/docs/project/images/proposal.snagproj new file mode 100644 index 00000000..c9ad49d0 Binary files /dev/null and b/docs/project/images/proposal.snagproj differ diff --git a/docs/project/images/pull_request_made.png b/docs/project/images/pull_request_made.png new file mode 100644 index 00000000..d51a1a75 Binary files /dev/null and b/docs/project/images/pull_request_made.png differ diff --git a/docs/project/images/red_notice.png b/docs/project/images/red_notice.png new file mode 100644 index 00000000..8839723a Binary files /dev/null and b/docs/project/images/red_notice.png differ diff --git a/docs/project/images/register_email.png b/docs/project/images/register_email.png new file mode 100644 index 00000000..02ef4cd2 Binary files /dev/null and b/docs/project/images/register_email.png differ diff --git a/docs/project/images/register_nic.png b/docs/project/images/register_nic.png new file mode 100644 index 00000000..16cf05a3 Binary files /dev/null and b/docs/project/images/register_nic.png differ diff --git a/docs/project/images/three_running.png b/docs/project/images/three_running.png new file mode 100644 index 00000000..cf6d25f2 Binary files /dev/null and b/docs/project/images/three_running.png differ diff --git a/docs/project/images/three_terms.png b/docs/project/images/three_terms.png new file mode 100644 index 00000000..7caa6ac6 Binary files /dev/null and b/docs/project/images/three_terms.png differ diff --git a/docs/project/images/to_from_pr.png b/docs/project/images/to_from_pr.png new file mode 100644 index 00000000..8dd6638e Binary files /dev/null and b/docs/project/images/to_from_pr.png differ diff --git a/docs/project/images/windows-env-vars.png b/docs/project/images/windows-env-vars.png new file mode 100644 index 00000000..68d00e90 Binary files /dev/null and b/docs/project/images/windows-env-vars.png differ diff --git a/docs/project/images/windows-mingw.png b/docs/project/images/windows-mingw.png new file mode 100644 index 00000000..b1d15e6b Binary files /dev/null and b/docs/project/images/windows-mingw.png differ diff --git a/docs/project/make-a-contribution.md b/docs/project/make-a-contribution.md new file mode 100644 index 00000000..a6d7fcaf --- /dev/null +++ b/docs/project/make-a-contribution.md @@ -0,0 +1,41 @@ + + +# Understand how to contribute + +Contributing is a process where you work with Docker maintainers and the +community to improve Docker. The maintainers are experienced contributors +who specialize in one or more Docker components. Maintainers play a big role +in reviewing contributions. + +There is a formal process for contributing. We try to keep our contribution +process simple so you'll want to contribute frequently. + + +## The basic contribution workflow + +In this guide, you work through Docker's basic contribution workflow by fixing a +single *beginner* issue in the `docker/docker` repository. The workflow +for fixing simple issues looks like this: + +![Simple process](/project/images/existing_issue.png) + +All Docker repositories have code and documentation. You use this same workflow +for either content type. For example, you can find and fix doc or code issues. +Also, you can propose a new Docker feature or propose a new Docker tutorial. + +Some workflow stages do have slight differences for code or documentation +contributions. When you reach that point in the flow, we make sure to tell you. + + +## Where to go next + +Now that you know a little about the contribution process, go to the next section +to [find an issue you want to work on](/project/find-an-issue/). diff --git a/docs/project/review-pr.md b/docs/project/review-pr.md new file mode 100644 index 00000000..841431b4 --- /dev/null +++ b/docs/project/review-pr.md @@ -0,0 +1,141 @@ + + + +# Participate in the PR review + +Creating a pull request is nearly the end of the contribution process. At this +point, your code is reviewed both by our continuous integration (CI) systems and +by our maintainers. + +The CI system is an automated system. The maintainers are human beings that also +work on Docker. You need to understand and work with both the "bots" and the +"beings" to review your contribution. + + +## How we process your review + +First to review your pull request is Gordon. Gordon is fast. He checks your +pull request (PR) for common problems like a missing signature. If Gordon finds a +problem, he'll send an email through your GitHub user account: + +![Gordon](/project/images/gordon.jpeg) + +Our build bot system starts building your changes while Gordon sends any emails. + +The build system double-checks your work by compiling your code with Docker's master +code. Building includes running the same tests you ran locally. If you forgot +to run tests or missed something in fixing problems, the automated build is our +safety check. + +After Gordon and the bots, the "beings" review your work. Docker maintainers look +at your pull request and comment on it. The shortest comment you might see is +`LGTM` which means **l**ooks-**g**ood-**t**o-**m**e. If you get an `LGTM`, that +is a good thing, you passed that review. + +For complex changes, maintainers may ask you questions or ask you to change +something about your submission. All maintainer comments on a PR go to the +email address associated with your GitHub account. Any GitHub user who +"participates" in a PR receives an email to. Participating means creating or +commenting on a PR. + +Our maintainers are very experienced Docker users and open source contributors. +So, they value your time and will try to work efficiently with you by keeping +their comments specific and brief. If they ask you to make a change, you'll +need to update your pull request with additional changes. + +## Update an existing pull request + +To update your existing pull request: + +1. Checkout the PR branch in your local `docker-fork` repository. + + This is the branch associated with your request. + +2. Change one or more files and then stage your changes. + + The command syntax is: + + git add + +3. Commit the change. + + $ git commit --amend + + Git opens an editor containing your last commit message. + +4. Adjust your last comment to reflect this new change. + + Added a new sentence per Anaud's suggestion + + Signed-off-by: Mary Anthony + + # Please enter the commit message for your changes. Lines starting + # with '#' will be ignored, and an empty message aborts the commit. + # On branch 11038-fix-rhel-link + # Your branch is up-to-date with 'origin/11038-fix-rhel-link'. + # + # Changes to be committed: + # modified: docs/installation/mac.md + # modified: docs/installation/rhel.md + +5. Force push the change to your origin. + + The command syntax is: + + git push -f origin + +6. Open your browser to your pull request on GitHub. + + You should see your pull request now contains your newly pushed code. + +7. Add a comment to your pull request. + + GitHub only notifies PR participants when you comment. For example, you can + mention that you updated your PR. Your comment alerts the maintainers that + you made an update. + +A change requires LGTMs from an absolute majority of an affected component's +maintainers. For example, if you change `docs/` and `registry/` code, an +absolute majority of the `docs/` and the `registry/` maintainers must approve +your PR. Once you get approval, we merge your pull request into Docker's +`master` code branch. + +## After the merge + +It can take time to see a merged pull request in Docker's official release. +A master build is available almost immediately though. Docker builds and +updates its development binaries after each merge to `master`. + +1. Browse to https://master.dockerproject.org/. + +2. Look for the binary appropriate to your system. + +3. Download and run the binary. + + You might want to run the binary in a container though. This + will keep your local host environment clean. + +4. View any documentation changes at docs.master.dockerproject.org. + +Once you've verified everything merged, feel free to delete your feature branch +from your fork. For information on how to do this, + +see the GitHub help on deleting branches. + +## Where to go next + +At this point, you have completed all the basic tasks in our contributors guide. +If you enjoyed contributing, let us know by completing another beginner +issue or two. We really appreciate the help. + +If you are very experienced and want to make a major change, go on to +[learn about advanced contributing](/project/advanced-contributing). diff --git a/docs/project/set-up-dev-env.md b/docs/project/set-up-dev-env.md new file mode 100644 index 00000000..4c70d18f --- /dev/null +++ b/docs/project/set-up-dev-env.md @@ -0,0 +1,426 @@ + + +# Work with a development container + +In this section, you learn to develop like a member of Docker's core team. +The `docker` repository includes a `Dockerfile` at its root. This file defines +Docker's development environment. The `Dockerfile` lists the environment's +dependencies: system libraries and binaries, Go environment, Go dependencies, +etc. + +Docker's development environment is itself, ultimately a Docker container. +You use the `docker` repository and its `Dockerfile` to create a Docker image, +run a Docker container, and develop code in the container. Docker itself builds, +tests, and releases new Docker versions using this container. + +If you followed the procedures that +set up Git for contributing, you should have a fork of the `docker/docker` +repository. You also created a branch called `dry-run-test`. In this section, +you continue working with your fork on this branch. + +## Clean your host of Docker artifacts + +Docker developers run the latest stable release of the Docker software (with Docker Machine if their machine is Mac OS X). They clean their local +hosts of unnecessary Docker artifacts such as stopped containers or unused +images. Cleaning unnecessary artifacts isn't strictly necessary, but it is +good practice, so it is included here. + +To remove unnecessary artifacts, + +1. Verify that you have no unnecessary containers running on your host. + + $ docker ps + + You should see something similar to the following: + + + + + + + + + + + +
CONTAINER IDIMAGECOMMANDCREATEDSTATUSPORTSNAMES
+ + There are no running containers on this host. If you have running but unused + containers, stop and then remove them with the `docker stop` and `docker rm` + commands. + +2. Verify that your host has no dangling images. + + $ docker images + + You should see something similar to the following: + + + + + + + + + +
REPOSITORYTAGIMAGE IDCREATEDVIRTUAL SIZE
+ + This host has no images. You may have one or more _dangling_ images. A + dangling image is not used by a running container and is not an ancestor of + another image on your system. A fast way to remove dangling containers is + the following: + + $ docker rmi -f $(docker images -q -a -f dangling=true) + + This command uses `docker images` to list all images (`-a` flag) by numeric + IDs (`-q` flag) and filter them to find dangling images (`-f dangling=true`). + Then, the `docker rmi` command forcibly (`-f` flag) removes + the resulting list. To remove just one image, use the `docker rmi ID` + command. + + +## Build an image + +If you followed the last procedure, your host is clean of unnecessary images +and containers. In this section, you build an image from the Docker development +environment. + +1. Open a terminal. + + Mac users, use `boot2docker status` to make sure Boot2Docker is running. You + may need to run `eval "$(boot2docker shellinit)"` to initialize your shell + environment. + +3. Change into the root of your forked repository. + + $ cd ~/repos/docker-fork + + If you are following along with this guide, you created a `dry-run-test` + branch when you set up Git for + contributing. + +4. Ensure you are on your `dry-run-test` branch. + + $ git checkout dry-run-test + + If you get a message that the branch doesn't exist, add the `-b` flag (git checkout -b dry-run-test) so the + command both creates the branch and checks it out. + +5. Compile your development environment container into an image. + + $ docker build -t dry-run-test . + + The `docker build` command returns informational message as it runs. The + first build may take a few minutes to create an image. Using the + instructions in the `Dockerfile`, the build may need to download source and + other images. A successful build returns a final status message similar to + the following: + + Successfully built 676815d59283 + +6. List your Docker images again. + + $ docker images + + You should see something similar to this: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
REPOSTITORYTAGIMAGE IDCREATEDVIRTUAL SIZE
dry-run-testlatest663fbee70028About a minute ago
ubuntutrusty2d24f826cb162 days ago188.3 MB
ubuntutrusty-20150218.12d24f826cb162 days ago188.3 MB
ubuntu14.042d24f826cb162 days ago188.3 MB
ubuntu14.04.22d24f826cb162 days ago188.3 MB
ubuntulatest2d24f826cb162 days ago188.3 MB
+ + Locate your new `dry-run-test` image in the list. You should also see a + number of `ubuntu` images. The build process creates these. They are the + ancestors of your new Docker development image. When you next rebuild your + image, the build process reuses these ancestors images if they exist. + + Keeping the ancestor images improves the build performance. When you rebuild + the child image, the build process uses the local ancestors rather than + retrieving them from the Hub. The build process gets new ancestors only if + DockerHub has updated versions. + +## Start a container and run a test + +At this point, you have created a new Docker development environment image. Now, +you'll use this image to create a Docker container to develop in. Then, you'll +build and run a `docker` binary in your container. + +1. Open two additional terminals on your host. + + At this point, you'll have about three terminals open. + + ![Multiple terminals](/project/images/three_terms.png) + + Mac OS X users, make sure you run `eval "$(boot2docker shellinit)"` in any new + terminals. + +2. In a terminal, create a new container from your `dry-run-test` image. + + $ docker run --privileged --rm -ti dry-run-test /bin/bash + root@5f8630b873fe:/go/src/github.com/docker/docker# + + The command creates a container from your `dry-run-test` image. It opens an + interactive terminal (`-ti`) running a `/bin/bash` shell. The + `--privileged` flag gives the container access to kernel features and device + access. This flag allows you to run a container in a container. + Finally, the `-rm` flag instructs Docker to remove the container when you + exit the `/bin/bash` shell. + + The container includes the source of your image repository in the + `/go/src/github.com/docker/docker` directory. Try listing the contents to + verify they are the same as that of your `docker-fork` repo. + + ![List example](/project/images/list_example.png) + + +3. Investigate your container bit. + + If you do a `go version` you'll find the `go` language is part of the + container. + + root@31ed86e9ddcf:/go/src/github.com/docker/docker# go version + go version go1.4.2 linux/amd64 + + Similarly, if you do a `docker version` you find the container + has no `docker` binary. + + root@31ed86e9ddcf:/go/src/github.com/docker/docker# docker version + bash: docker: command not found + + You will create one in the next steps. + +4. From the `/go/src/github.com/docker/docker` directory make a `docker` binary +with the `make.sh` script. + + root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh binary + + You only call `hack/make.sh` to build a binary _inside_ a Docker + development container as you are now. On your host, you'll use `make` + commands (more about this later). + + As it makes the binary, the `make.sh` script reports the build's progress. + When the command completes successfully, you should see the following + output: + + ---> Making bundle: binary (in bundles/1.5.0-dev/binary) + Created binary: /go/src/github.com/docker/docker/bundles/1.5.0-dev/binary/docker-1.5.0-dev + +5. List all the contents of the `binary` directory. + + root@5f8630b873fe:/go/src/github.com/docker/docker# ls bundles/1.5.0-dev/binary/ + docker docker-1.5.0-dev docker-1.5.0-dev.md5 docker-1.5.0-dev.sha256 + + You should see that `binary` directory, just as it sounds, contains the + made binaries. + + +6. Copy the `docker` binary to the `/usr/bin` of your container. + + root@5f8630b873fe:/go/src/github.com/docker/docker# cp bundles/1.5.0-dev/binary/docker /usr/bin + +7. Inside your container, check your Docker version. + + root@5f8630b873fe:/go/src/github.com/docker/docker# docker --version + Docker version 1.5.0-dev, build 6e728fb + + Inside the container you are running a development version. This is the version + on the current branch. It reflects the value of the `VERSION` file at the + root of your `docker-fork` repository. + +8. Start a `docker` daemon running inside your container. + + root@5f8630b873fe:/go/src/github.com/docker/docker# docker daemon -D + + The `-dD` flag starts the daemon in debug mode. You'll find this useful + when debugging your code. + +9. Bring up one of the terminals on your local host. + + +10. List your containers and look for the container running the `dry-run-test` image. + + $ docker ps + + + + + + + + + + + + + + + + + + + + +
CONTAINER IDIMAGECOMMANDCREATEDSTATUSPORTSNAMES
474f07652525dry-run-test:latest"hack/dind /bin/bash14 minutes agoUp 14 minutestender_shockley
+ + In this example, the container's name is `tender_shockley`; yours will be + different. + +11. From the terminal, start another shell on your Docker development container. + + $ docker exec -it tender_shockley bash + + At this point, you have two terminals both with a shell open into your + development container. One terminal is running a debug session. The other + terminal is displaying a `bash` prompt. + +12. At the prompt, test the Docker client by running the `hello-world` container. + + root@9337c96e017a:/go/src/github.com/docker/docker# docker run hello-world + + You should see the image load and return. Meanwhile, you + can see the calls made via the debug session in your other terminal. + + ![List example](/project/images/three_running.png) + + +## Restart a container with your source + +At this point, you have experienced the "Docker inception" technique. That is, +you have: + +* built a Docker image from the Docker repository +* created and started a Docker development container from that image +* built a Docker binary inside of your Docker development container +* launched a `docker` daemon using your newly compiled binary +* called the `docker` client to run a `hello-world` container inside + your development container + +When you really get to developing code though, you'll want to iterate code +changes and builds inside the container. For that you need to mount your local +Docker repository source into your Docker container. Try that now. + +1. If you haven't already, exit out of BASH shells in your running Docker +container. + + If you have followed this guide exactly, exiting out your BASH shells stops + the running container. You can use the `docker ps` command to verify the + development container is stopped. All of your terminals should be at the + local host prompt. + +2. Choose a terminal and make sure you are in your `docker-fork` repository. + + $ pwd + /Users/mary/go/src/github.com/moxiegirl/docker-fork + + Your location will be different because it reflects your environment. + +3. Create a container using `dry-run-test`, but this time, mount your repository +onto the `/go` directory inside the container. + + $ docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/docker/docker dry-run-test /bin/bash + + When you pass `pwd`, `docker` resolves it to your current directory. + +4. From inside the container, list your `binary` directory. + + root@074626fc4b43:/go/src/github.com/docker/docker# ls bundles/1.5.0-dev/binary + ls: cannot access binary: No such file or directory + + Your `dry-run-test` image does not retain any of the changes you made inside + the container. This is the expected behavior for a container. + +5. In a fresh terminal on your local host, change to the `docker-fork` root. + + $ cd ~/repos/docker-fork/ + +6. Create a fresh binary, but this time, use the `make` command. + + $ make BINDDIR=. binary + + The `BINDDIR` flag is only necessary on Mac OS X but it won't hurt to pass + it on Linux command line. The `make` command, like the `make.sh` script + inside the container, reports its progress. When the make succeeds, it + returns the location of the new binary. + + +7. Back in the terminal running the container, list your `binary` directory. + + root@074626fc4b43:/go/src/github.com/docker/docker# ls bundles/1.5.0-dev/binary + docker docker-1.5.0-dev docker-1.5.0-dev.md5 docker-1.5.0-dev.sha256 + + The compiled binaries created from your repository on your local host are + now available inside your running Docker development container. + +8. Repeat the steps you ran in the previous procedure. + + * copy the binary inside the development container using + `cp bundles/1.5.0-dev/binary/docker /usr/bin` + * start `docker daemon -D` to launch the Docker daemon inside the container + * run `docker ps` on local host to get the development container's name + * connect to your running container `docker exec -it container_name bash` + * use the `docker run hello-world` command to create and run a container + inside your development container + +## Where to go next + +Congratulations, you have successfully achieved Docker inception. At this point, +you've set up your development environment and verified almost all the essential +processes you need to contribute. Of course, before you start contributing, +[you'll need to learn one more piece of the development environment, the test +framework](/project/test-and-docs/). diff --git a/docs/project/set-up-git.md b/docs/project/set-up-git.md new file mode 100644 index 00000000..5d3ac856 --- /dev/null +++ b/docs/project/set-up-git.md @@ -0,0 +1,248 @@ + + +# Configure Git for contributing + +Work through this page to configure Git and a repository you'll use throughout +the Contributor Guide. The work you do further in the guide, depends on the work +you do here. + +## Fork and clone the Docker code + +Before contributing, you first fork the Docker code repository. A fork copies +a repository at a particular point in time. GitHub tracks for you where a fork +originates. + +As you make contributions, you change your fork's code. When you are ready, +you make a pull request back to the original Docker repository. If you aren't +familiar with this workflow, don't worry, this guide walks you through all the +steps. + +To fork and clone Docker: + +1. Open a browser and log into GitHub with your account. + +2. Go to the docker/docker repository. + +3. Click the "Fork" button in the upper right corner of the GitHub interface. + + ![Branch Signature](/project/images/fork_docker.png) + + GitHub forks the repository to your GitHub account. The original + `docker/docker` repository becomes a new fork `YOUR_ACCOUNT/docker` under + your account. + +4. Copy your fork's clone URL from GitHub. + + GitHub allows you to use HTTPS or SSH protocols for clones. You can use the + `git` command line or clients like Subversion to clone a repository. + + ![Copy clone URL](/project/images/copy_url.png) + + This guide assume you are using the HTTPS protocol and the `git` command + line. If you are comfortable with SSH and some other tool, feel free to use + that instead. You'll need to convert what you see in the guide to what is + appropriate to your tool. + +5. Open a terminal window on your local host and change to your home directory. + + $ cd ~ + + In Windows, you'll work in your Docker Quickstart Terminal window instead of + Powershell or a `cmd` window. + +6. Create a `repos` directory. + + $ mkdir repos + +7. Change into your `repos` directory. + + $ cd repos + +5. Clone the fork to your local host into a repository called `docker-fork`. + + $ git clone https://github.com/moxiegirl/docker.git docker-fork + + Naming your local repo `docker-fork` should help make these instructions + easier to follow; experienced coders don't typically change the name. + +6. Change directory into your new `docker-fork` directory. + + $ cd docker-fork + + Take a moment to familiarize yourself with the repository's contents. List + the contents. + +## Set your signature and an upstream remote + +When you contribute to Docker, you must certify you agree with the +Developer Certificate of Origin. +You indicate your agreement by signing your `git` commits like this: + + Signed-off-by: Pat Smith + +To create a signature, you configure your username and email address in Git. +You can set these globally or locally on just your `docker-fork` repository. +You must sign with your real name. We don't accept anonymous contributions or +contributions through pseudonyms. + +As you change code in your fork, you'll want to keep it in sync with the changes +others make in the `docker/docker` repository. To make syncing easier, you'll +also add a _remote_ called `upstream` that points to `docker/docker`. A remote +is just another project version hosted on the internet or network. + +To configure your username, email, and add a remote: + +1. Change to the root of your `docker-fork` repository. + + $ cd docker-fork + +2. Set your `user.name` for the repository. + + $ git config --local user.name "FirstName LastName" + +3. Set your `user.email` for the repository. + + $ git config --local user.email "emailname@mycompany.com" + +4. Set your local repo to track changes upstream, on the `docker` repository. + + $ git remote add upstream https://github.com/docker/docker.git + +7. Check the result in your `git` configuration. + + $ git config --local -l + core.repositoryformatversion=0 + core.filemode=true + core.bare=false + core.logallrefupdates=true + remote.origin.url=https://github.com/moxiegirl/docker.git + remote.origin.fetch=+refs/heads/*:refs/remotes/origin/* + branch.master.remote=origin + branch.master.merge=refs/heads/master + user.name=Mary Anthony + user.email=mary@docker.com + remote.upstream.url=https://github.com/docker/docker.git + remote.upstream.fetch=+refs/heads/*:refs/remotes/upstream/* + + To list just the remotes use: + + $ git remote -v + origin https://github.com/moxiegirl/docker.git (fetch) + origin https://github.com/moxiegirl/docker.git (push) + upstream https://github.com/docker/docker.git (fetch) + upstream https://github.com/docker/docker.git (push) + +## Create and push a branch + +As you change code in your fork, make your changes on a repository branch. +The branch name should reflect what you are working on. In this section, you +create a branch, make a change, and push it up to your fork. + +This branch is just for testing your config for this guide. The changes are part +of a dry run, so the branch name will be dry-run-test. To create and push +the branch to your fork on GitHub: + +1. Open a terminal and go to the root of your `docker-fork`. + + $ cd docker-fork + +2. Create a `dry-run-test` branch. + + $ git checkout -b dry-run-test + + This command creates the branch and switches the repository to it. + +3. Verify you are in your new branch. + + $ git branch + * dry-run-test + master + + The current branch has an * (asterisk) marker. So, these results shows you + are on the right branch. + +4. Create a `TEST.md` file in the repository's root. + + $ touch TEST.md + +5. Edit the file and add your email and location. + + ![Add your information](/project/images/contributor-edit.png) + + You can use any text editor you are comfortable with. + +6. Save and close the file. + +7. Check the status of your branch. + + $ git status + On branch dry-run-test + Untracked files: + (use "git add ..." to include in what will be committed) + + TEST.md + + nothing added to commit but untracked files present (use "git add" to track) + + You've only changed the one file. It is untracked so far by git. + +8. Add your file. + + $ git add TEST.md + + That is the only _staged_ file. Stage is fancy word for work that Git is + tracking. + +9. Sign and commit your change. + + $ git commit -s -m "Making a dry run test." + [dry-run-test 6e728fb] Making a dry run test + 1 file changed, 1 insertion(+) + create mode 100644 TEST.md + + Commit messages should have a short summary sentence of no more than 50 + characters. Optionally, you can also include a more detailed explanation + after the summary. Separate the summary from any explanation with an empty + line. + +8. Push your changes to GitHub. + + $ git push --set-upstream origin dry-run-test + Username for 'https://github.com': moxiegirl + Password for 'https://moxiegirl@github.com': + + Git prompts you for your GitHub username and password. Then, the command + returns a result. + + Counting objects: 13, done. + Compressing objects: 100% (2/2), done. + Writing objects: 100% (3/3), 320 bytes | 0 bytes/s, done. + Total 3 (delta 1), reused 0 (delta 0) + To https://github.com/moxiegirl/docker.git + * [new branch] dry-run-test -> dry-run-test + Branch dry-run-test set up to track remote branch dry-run-test from origin. + +9. Open your browser to GitHub. + +10. Navigate to your Docker fork. + +11. Make sure the `dry-run-test` branch exists, that it has your commit, and the +commit is signed. + + ![Branch Signature](/project/images/branch-sig.png) + +## Where to go next + +Congratulations, you have finished configuring both your local host environment +and Git for contributing. In the next section you'll [learn how to set up and +work in a Docker development container](/project/set-up-dev-env/). diff --git a/docs/project/software-req-win.md b/docs/project/software-req-win.md new file mode 100644 index 00000000..ef722005 --- /dev/null +++ b/docs/project/software-req-win.md @@ -0,0 +1,265 @@ + + + +# Get the required software for Windows + +This page explains how to get the software you need to use a a Windows Server +2012 or Windows 8 machine for Docker development. Before you begin contributing +you must have: + +- a GitHub account +- Git for Windows (msysGit) +- TDM-GCC, a compiler suite for Windows +- MinGW (tar and xz) +- Go language + +> **Note**: This installation procedure refers to the `C:\` drive. If you system's main drive +is `D:\` you'll need to substitute that in where appropriate in these +instructions. + +### Get a GitHub account + +To contribute to the Docker project, you will need a GitHub account. A free account is +fine. All the Docker project repositories are public and visible to everyone. + +You should also have some experience using both the GitHub application and `git` +on the command line. + +## Install Git for Windows + +Git for Windows includes several tools including msysGit, which is a build +environment. The environment contains the tools you need for development such as +Git and a Git Bash shell. + +1. Browse to the [Git for Windows](https://msysgit.github.io/) download page. + +2. Click **Download**. + + Windows prompts you to save the file to your machine. + +3. Run the saved file. + + The system displays the **Git Setup** wizard. + +4. Click the **Next** button to move through the wizard and accept all the defaults. + +5. Click **Finish** when you are done. + +## Installing TDM-GCC + +TDM-GCC is a compiler suite for Windows. You'll use this suite to compile the +Docker Go code as you develop. + +1. Browse to + [tdm-gcc download page](http://tdm-gcc.tdragon.net/download). + +2. Click on the latest 64-bit version of the package. + + Windows prompts you to save the file to your machine + +3. Set up the suite by running the downloaded file. + + The system opens the **TDM-GCC Setup** wizard. + +4. Click **Create**. + +5. Click the **Next** button to move through the wizard and accept all the defaults. + +6. Click **Finish** when you are done. + + +## Installing MinGW (tar and xz) + +MinGW is a minimalist port of the GNU Compiler Collection (GCC). In this +procedure, you first download and install the MinGW installation manager. Then, +you use the manager to install the `tar` and `xz` tools from the collection. + +1. Browse to MinGW + [SourceForge](http://sourceforge.net/projects/mingw/). + +2. Click **Download**. + + Windows prompts you to save the file to your machine + +3. Run the downloaded file. + + The system opens the **MinGW Installation Manager Setup Tool** + +4. Choose **Install** install the MinGW Installation Manager. + +5. Press **Continue**. + + The system installs and then opens the MinGW Installation Manager. + +6. Press **Continue** after the install completes to open the manager. + +7. Select **All Packages > MSYS Base System** from the left hand menu. + + The system displays the available packages. + +8. Click on the the **msys-tar bin** package and choose **Mark for Installation**. + +9. Click on the **msys-xz bin** package and choose **Mark for Installation**. + +10. Select **Installation > Apply Changes**, to install the selected packages. + + The system displays the **Schedule of Pending Actions Dialog**. + + ![windows-mingw](/project/images/windows-mingw.png) + +11. Press **Apply** + + MingGW installs the packages for you. + +12. Close the dialog and the MinGW Installation Manager. + + +## Set up your environment variables + +You'll need to add the compiler to your `Path` environment variable. + +1. Open the **Control Panel**. + +2. Choose **System and Security > System**. + +3. Click the **Advanced system settings** link in the sidebar. + + The system opens the **System Properties** dialog. + +3. Select the **Advanced** tab. + +4. Click **Environment Variables**. + + The system opens the **Environment Variables dialog** dialog. + +5. Locate the **System variables** area and scroll to the **Path** + variable. + + ![windows-mingw](/project/images/path_variable.png) + +6. Click **Edit** to edit the variable (you can also double-click it). + + The system opens the **Edit System Variable** dialog. + +7. Make sure the `Path` includes `C:\TDM-GCC64\bin` + + ![include gcc](/project/images/include_gcc.png) + + If you don't see `C:\TDM-GCC64\bin`, add it. + +8. Press **OK** to close this dialog. + +9. Press **OK** twice to close out of the remaining dialogs. + +## Install Go and cross-compile it + +In this section, you install the Go language. Then, you build the source so that it can cross-compile for `linux/amd64` architectures. + +1. Open [Go Language download](http://golang.org/dl/) page in your browser. + +2. Locate and click the latest `.msi` installer. + + The system prompts you to save the file. + +3. Run the installer. + + The system opens the **Go Programming Language Setup** dialog. + +4. Select all the defaults to install. + +5. Press **Finish** to close the installation dialog. + +6. Start a command prompt. + +7. Change to the Go `src` directory. + + cd c:\Go\src + +8. Set the following Go variables + + c:\Go\src> set GOOS=linux + c:\Go\src> set GOARCH=amd64 + +9. Compile the source. + + c:\Go\src> make.bat + + Compiling the source also adds a number of variables to your Windows environment. + +## Get the Docker repository + +In this step, you start a Git `bash` terminal and get the Docker source code +from GitHub. + +1. Locate the **Git Bash** program and start it. + + Recall that **Git Bash** came with the Git for Windows installation. **Git + Bash** just as it sounds allows you to run a Bash terminal on Windows. + + ![Git Bash](/project/images/git_bash.png) + +2. Change to the root directory. + + $ cd /c/ + +3. Make a `gopath` directory. + + $ mkdir gopath + +4. Go get the `docker/docker` repository. + + $ go.exe get github.com/docker/docker package github.com/docker/docker + imports github.com/docker/docker + imports github.com/docker/docker: no buildable Go source files in C:\gopath\src\github.com\docker\docker + + In the next steps, you create environment variables for you Go paths. + +5. Open the **Control Panel** on your system. + +6. Choose **System and Security > System**. + +7. Click the **Advanced system settings** link in the sidebar. + + The system opens the **System Properties** dialog. + +8. Select the **Advanced** tab. + +9. Click **Environment Variables**. + + The system opens the **Environment Variables dialog** dialog. + +10. Locate the **System variables** area and scroll to the **Path** + variable. + +11. Click **New**. + + Now you are going to create some new variables. These paths you'll create in the next procedure; but you can set them now. + +12. Enter `GOPATH` for the **Variable Name**. + +13. For the **Variable Value** enter the following: + + C:\gopath;C:\gopath\src\github.com\docker\docker\vendor + + +14. Press **OK** to close this dialog. + + The system adds `GOPATH` to the list of **System Variables**. + +15. Press **OK** twice to close out of the remaining dialogs. + + +## Where to go next + +In the next section, you'll [learn how to set up and configure Git for +contributing to Docker](/project/set-up-git/). \ No newline at end of file diff --git a/docs/project/software-required.md b/docs/project/software-required.md new file mode 100644 index 00000000..cbe10a83 --- /dev/null +++ b/docs/project/software-required.md @@ -0,0 +1,99 @@ + + +# Get the required software for Linux or OS X + +This page explains how to get the software you need to use a Linux or OS X +machine for Docker development. Before you begin contributing you must have: + +* a GitHub account +* `git` +* `make` +* `docker` + +You'll notice that `go`, the language that Docker is written in, is not listed. +That's because you don't need it installed; Docker's development environment +provides it for you. You'll learn more about the development environment later. + +### Get a GitHub account + +To contribute to the Docker project, you will need a GitHub account. A free account is +fine. All the Docker project repositories are public and visible to everyone. + +You should also have some experience using both the GitHub application and `git` +on the command line. + +### Install git + +Install `git` on your local system. You can check if `git` is on already on your +system and properly installed with the following command: + + $ git --version + + +This documentation is written using `git` version 2.2.2. Your version may be +different depending on your OS. + +### Install make + +Install `make`. You can check if `make` is on your system with the following +command: + + $ make -v + +This documentation is written using GNU Make 3.81. Your version may be different +depending on your OS. + +### Install or upgrade Docker + +If you haven't already, install the Docker software using the +instructions for your operating system. +If you have an existing installation, check your version and make sure you have +the latest Docker. + +To check if `docker` is already installed on Linux: + + $ docker --version + Docker version 1.5.0, build a8a31ef + +On Mac OS X or Windows, you should have installed Boot2Docker which includes +Docker. You'll need to verify both Boot2Docker and then Docker. This +documentation was written on OS X using the following versions. + + $ boot2docker version + Boot2Docker-cli version: v1.5.0 + Git commit: ccd9032 + + $ docker --version + Docker version 1.5.0, build a8a31ef + +## Linux users and sudo + +This guide assumes you have added your user to the `docker` group on your system. +To check, list the group's contents: + + $ getent group docker + docker:x:999:ubuntu + +If the command returns no matches, you have two choices. You can preface this +guide's `docker` commands with `sudo` as you work. Alternatively, you can add +your user to the `docker` group as follows: + + $ sudo usermod -aG docker ubuntu + +You must log out and log back in for this modification to take effect. + + +## Where to go next + +In the next section, you'll [learn how to set up and configure Git for +contributing to Docker](/project/set-up-git/). diff --git a/docs/project/test-and-docs.md b/docs/project/test-and-docs.md new file mode 100644 index 00000000..554de7d7 --- /dev/null +++ b/docs/project/test-and-docs.md @@ -0,0 +1,340 @@ + + +# Run tests and test documentation + +Contributing includes testing your changes. If you change the Docker code, you +may need to add a new test or modify an existing one. Your contribution could +even be adding tests to Docker. For this reason, you need to know a little +about Docker's test infrastructure. + +Many contributors contribute documentation only. Or, a contributor makes a code +contribution that changes how Docker behaves and that change needs +documentation. For these reasons, you also need to know how to build, view, and +test the Docker documentation. + +In this section, you run tests in the `dry-run-test` branch of your Docker +fork. If you have followed along in this guide, you already have this branch. +If you don't have this branch, you can create it or simply use another of your +branches. + +## Understand testing at Docker + +Docker tests use the Go language's test framework. In this framework, files +whose names end in `_test.go` contain test code; you'll find test files like +this throughout the Docker repo. Use these files for inspiration when writing +your own tests. For information on Go's test framework, see Go's testing package +documentation and the go test help. + +You are responsible for _unit testing_ your contribution when you add new or +change existing Docker code. A unit test is a piece of code that invokes a +single, small piece of code ( _unit of work_ ) to verify the unit works as +expected. + +Depending on your contribution, you may need to add _integration tests_. These +are tests that combine two or more work units into one component. These work +units each have unit tests and then, together, integration tests that test the +interface between the components. The `integration` and `integration-cli` +directories in the Docker repository contain integration test code. + +Testing is its own specialty. If you aren't familiar with testing techniques, +there is a lot of information available to you on the Web. For now, you should +understand that, the Docker maintainers may ask you to write a new test or +change an existing one. + +### Run tests on your local host + +Before submitting any code change, you should run the entire Docker test suite. +The `Makefile` contains a target for the entire test suite. The target's name +is simply `test`. The make file contains several targets for testing: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TargetWhat this target does
testRun all the tests.
test-unitRun just the unit tests.
test-integration-cliRun the test for the integration command line interface.
test-docker-pyRun the tests for Docker API client.
docs-testRuns the documentation test build.
+ +Run the entire test suite on your current repository: + +1. Open a terminal on your local host. + +2. Change to the root your Docker repository. + + $ cd docker-fork + +3. Make sure you are in your development branch. + + $ git checkout dry-run-test + +4. Run the `make test` command. + + $ make test + + This command does several things, it creates a container temporarily for + testing. Inside that container, the `make`: + + * creates a new binary + * cross-compiles all the binaries for the various operating systems + * runs all the tests in the system + + It can take several minutes to run all the tests. When they complete + successfully, you see the output concludes with something like this: + + + [PASSED]: top - sleep process should be listed in privileged mode + [PASSED]: version - verify that it works and that the output is properly formatted + PASS + coverage: 70.8% of statements + ---> Making bundle: test-docker-py (in bundles/1.5.0-dev/test-docker-py) + +++ exec docker daemon --debug --host unix:///go/src/github.com/docker/docker/bundles/1.5.0-dev/test-docker-py/docker.sock --storage-driver vfs --exec-driver native --pidfile /go/src/github.com/docker/docker/bundles/1.5.0-dev/test-docker-py/docker.pid + ................................................................. + ---------------------------------------------------------------------- + Ran 65 tests in 89.266s + + +### Run test targets inside the development container + +If you are working inside a Docker development container, you use the +`hack/make.sh` script to run tests. The `hack/make.sh` script doesn't +have a single target that runs all the tests. Instead, you provide a single +command line with multiple targets that does the same thing. + +Try this now. + +1. Open a terminal and change to the `docker-fork` root. + +2. Start a Docker development image. + + If you are following along with this guide, you should have a + `dry-run-test` image. + + $ docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/docker/docker dry-run-test /bin/bash + +3. Run the tests using the `hack/make.sh` script. + + root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py + + The tests run just as they did within your local host. + + +Of course, you can also run a subset of these targets too. For example, to run +just the unit tests: + + root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh dynbinary binary cross test-unit + +Most test targets require that you build these precursor targets first: +`dynbinary binary cross` + + +## Running individual or multiple named tests + +We use [gocheck](https://labix.org/gocheck) for our integration-cli tests. +You can use the `TESTFLAGS` environment variable to run a single test. The +flag's value is passed as arguments to the `go test` command. For example, from +your local host you can run the `TestBuild` test with this command: + + $ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration-cli + +To run the same test inside your Docker development container, you do this: + + root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration-cli + +## If tests under Boot2Docker fail due to disk space errors + +Running the tests requires about 2GB of memory. If you are running your +container on bare metal, that is you are not running with Boot2Docker, your +Docker development container is able to take the memory it requires directly +from your local host. + +If you are running Docker using Boot2Docker, the VM uses 2048MB by default. +This means you can exceed the memory of your VM running tests in a Boot2Docker +environment. When the test suite runs out of memory, it returns errors similar +to the following: + + server.go:1302 Error: Insertion failed because database is full: database or + disk is full + + utils_test.go:179: Error copy: exit status 1 (cp: writing + '/tmp/docker-testd5c9-[...]': No space left on device + +To increase the memory on your VM, you need to reinitialize the Boot2Docker VM +with new memory settings. + +1. Stop all running containers. + +2. View the current memory setting. + + $ boot2docker info + { + "Name": "boot2docker-vm", + "UUID": "491736fd-4075-4be7-a6f5-1d4cdcf2cc74", + "Iso": "/Users/mary/.boot2docker/boot2docker.iso", + "State": "running", + "CPUs": 8, + "Memory": 2048, + "VRAM": 8, + "CfgFile": "/Users/mary/VirtualBox VMs/boot2docker-vm/boot2docker-vm.vbox", + "BaseFolder": "/Users/mary/VirtualBox VMs/boot2docker-vm", + "OSType": "", + "Flag": 0, + "BootOrder": null, + "DockerPort": 0, + "SSHPort": 2022, + "SerialFile": "/Users/mary/.boot2docker/boot2docker-vm.sock" + } + + +3. Delete your existing `boot2docker` profile. + + $ boot2docker delete + +4. Reinitialize `boot2docker` and specify a higher memory. + + $ boot2docker init -m 5555 + +5. Verify the memory was reset. + + $ boot2docker info + +6. Restart your container and try your test again. + + +## Testing just the Windows client + +This explains how to test the Windows client on a Windows server set up as a +development environment. You'll use the **Git Bash** came with the Git for +Windows installation. **Git Bash** just as it sounds allows you to run a Bash +terminal on Windows. + +1. If you don't have one, start a Git Bash terminal. + + ![Git Bash](/project/images/git_bash.png) + +2. Change to the `docker` source directory. + + $ cd /c/gopath/src/github.com/docker/docker + +3. Set `DOCKER_CLIENTONLY` as follows: + + $ export DOCKER_CLIENTONLY=1 + + This ensures you are building only the client binary instead of both the + binary and the daemon. + +4. Set `DOCKER_TEST_HOST` to the `tcp://IP_ADDRESS:2376` value; substitute your +machine's actual IP address, for example: + + $ export DOCKER_TEST_HOST=tcp://263.124.23.200:2376 + +5. Make the binary and the test: + + $ hack/make.sh binary test-integration-cli + + Many tests are skipped on Windows for various reasons. You see which tests + were skipped by re-running the make and passing in the + `TESTFLAGS='-test.v'` value. + + +You can now choose to make changes to the Docker source or the tests. If you +make any changes just run these commands again. + + +## Build and test the documentation + +The Docker documentation source files are under `docs`. The content is +written using extended Markdown. We use the static generator MkDocs to build Docker's +documentation. Of course, you don't need to install this generator +to build the documentation, it is included with container. + +You should always check your documentation for grammar and spelling. The best +way to do this is with an online grammar checker. + +When you change a documentation source file, you should test your change +locally to make sure your content is there and any links work correctly. You +can build the documentation from the local host. The build starts a container +and loads the documentation into a server. As long as this container runs, you +can browse the docs. + +1. In a terminal, change to the root of your `docker-fork` repository. + + $ cd ~/repos/docker-fork + +2. Make sure you are in your feature branch. + + $ git status + On branch dry-run-test + Your branch is up-to-date with 'origin/dry-run-test'. + nothing to commit, working directory clean + +3. Build the documentation. + + $ make docs + + When the build completes, you'll see a final output message similar to the + following: + + Successfully built ee7fe7553123 + docker run --rm -it -e AWS_S3_BUCKET -e NOCACHE -p 8000:8000 "docker-docs:dry-run-test" mkdocs serve + Running at: http://0.0.0.0:8000/ + Live reload enabled. + Hold ctrl+c to quit. + +4. Enter the URL in your browser. + + If you are using Docker Machine, replace the default localhost address + (0.0.0.0) with your DOCKERHOST value. You can get this value at any time by + entering `docker-machine ip ` at the command line. + +5. Once in the documentation, look for the red notice to verify you are seeing the correct build. + + ![Beta documentation](/project/images/red_notice.png) + +6. Navigate to your new or changed document. + +7. Review both the content and the links. + +8. Return to your terminal and exit out of the running documentation container. + + +## Where to go next + +Congratulations, you have successfully completed the basics you need to +understand the Docker test framework. In the next steps, you use what you have +learned so far to [contribute to Docker by working on an +issue](/project/make-a-contribution/). diff --git a/docs/project/who-written-for.md b/docs/project/who-written-for.md new file mode 100644 index 00000000..95676938 --- /dev/null +++ b/docs/project/who-written-for.md @@ -0,0 +1,63 @@ + + +# README first + +This section of the documentation contains a guide for Docker users who want to +contribute code or documentation to the Docker project. As a community, we +share rules of behavior and interaction. Make sure you are familiar with the community guidelines before continuing. + +## Where and what you can contribute + +The Docker project consists of not just one but several repositories on GitHub. +So, in addition to the `docker/docker` repository, there is the +`docker/compose` repo, the `docker/machine` repo, and several more. +Contribute to any of these and you contribute to the Docker project. + +Not all Docker repositories use the Go language. Also, each repository has its +own focus area. So, if you are an experienced contributor, think about +contributing to a Docker repository that has a language or a focus area you are +familiar with. + +If you are new to the open source community, to Docker, or to formal +programming, you should start out contributing to the `docker/docker` +repository. Why? Because this guide is written for that repository specifically. + +Finally, code or documentation isn't the only way to contribute. You can report +an issue, add to discussions in our community channel, write a blog post, or +take a usability test. You can even propose your own type of contribution. +Right now we don't have a lot written about this yet, so just email + if this type of contributing interests you. + +## A turtle is involved + +![Gordon](/project/images/gordon.jpeg) + +Enough said. + +## How to use this guide + +This is written for the distracted, the overworked, the sloppy reader with fair +`git` skills and a failing memory for the GitHub GUI. The guide attempts to +explain how to use the Docker environment as precisely, predictably, and +procedurally as possible. + +Users who are new to the Docker development environment should start by setting +up their environment. Then, they should try a simple code change. After that, +you should find something to work on or propose at totally new change. + +If you are a programming prodigy, you still may find this documentation useful. +Please feel free to skim past information you find obvious or boring. + +## How to get started + +Start by [getting the software you need to contribute](/project/software-required/). diff --git a/docs/project/work-issue.md b/docs/project/work-issue.md new file mode 100644 index 00000000..6f13c2c1 --- /dev/null +++ b/docs/project/work-issue.md @@ -0,0 +1,200 @@ + + + +# Work on your issue + +The work you do for your issue depends on the specific issue you picked. +This section gives you a step-by-step workflow. Where appropriate, it provides +command examples. + +However, this is a generalized workflow, depending on your issue you may repeat +steps or even skip some. How much time the work takes depends on you --- you +could spend days or 30 minutes of your time. + +## How to work on your local branch + +Follow this workflow as you work: + +1. Review the appropriate style guide. + + If you are changing code, review the coding style guide. Changing documentation? Review the + documentation style guide. + +2. Make changes in your feature branch. + + Your feature branch you created in the last section. Here you use the + development container. If you are making a code change, you can mount your + source into a development container and iterate that way. For documentation + alone, you can work on your local host. + + Make sure you don't change files in the `vendor` directory and its + subdirectories; they contain third-party dependency code. Review if you forgot the details of + working with a container. + + +3. Test your changes as you work. + + If you have followed along with the guide, you know the `make test` target + runs the entire test suite and `make docs` builds the documentation. If you + forgot the other test targets, see the documentation for testing both code and + documentation. + +4. For code changes, add unit tests if appropriate. + + If you add new functionality or change existing functionality, you should + add a unit test also. Use the existing test files for inspiration. Aren't + sure if you need tests? Skip this step; you can add them later in the + process if necessary. + +5. Format your source files correctly. + + + + + + + + + + + + + + + + + + +
File typeHow to format
.go +

+ Format .go files using the gofmt command. + For example, if you edited the `docker.go` file you would format the file + like this: +

+

$ gofmt -s -w docker.go

+

+ Most file editors have a plugin to format for you. Check your editor's + documentation. +

+
.md and non-.go filesWrap lines to 80 characters.
+ +6. List your changes. + + $ git status + On branch 11038-fix-rhel-link + Changes not staged for commit: + (use "git add ..." to update what will be committed) + (use "git checkout -- ..." to discard changes in working directory) + + modified: docs/installation/mac.md + modified: docs/installation/rhel.md + + The `status` command lists what changed in the repository. Make sure you see + the changes you expect. + +7. Add your change to Git. + + $ git add docs/installation/mac.md + $ git add docs/installation/rhel.md + + +8. Commit your changes making sure you use the `-s` flag to sign your work. + + $ git commit -s -m "Fixing RHEL link" + +9. Push your change to your repository. + + $ git push origin 11038-fix-rhel-link + Username for 'https://github.com': moxiegirl + Password for 'https://moxiegirl@github.com': + Counting objects: 60, done. + Compressing objects: 100% (7/7), done. + Writing objects: 100% (7/7), 582 bytes | 0 bytes/s, done. + Total 7 (delta 6), reused 0 (delta 0) + To https://github.com/moxiegirl/docker.git + * [new branch] 11038-fix-rhel-link -> 11038-fix-rhel-link + Branch 11038-fix-rhel-link set up to track remote branch 11038-fix-rhel-link from origin. + +## Review your branch on GitHub + +After you push a new branch, you should verify it on GitHub: + +1. Open your browser to GitHub. + +2. Go to your Docker fork. + +3. Select your branch from the dropdown. + + ![Find branch](/project/images/locate_branch.png) + +4. Use the "Compare" button to compare the differences between your branch and master. + + Depending how long you've been working on your branch, your branch maybe + behind Docker's upstream repository. + +5. Review the commits. + + Make sure your branch only shows the work you've done. + +## Pull and rebase frequently + +You should pull and rebase frequently as you work. + +1. Return to the terminal on your local machine and checkout your + feature branch in your local `docker-fork` repository. + +2. Fetch any last minute changes from `docker/docker`. + + $ git fetch upstream master + From github.com:docker/docker + * branch master -> FETCH_HEAD + +3. Start an interactive rebase. + + $ git rebase -i upstream/master + +4. Rebase opens an editor with a list of commits. + + pick 1a79f55 Tweak some of the other text for grammar + pick 53e4983 Fix a link + pick 3ce07bb Add a new line about RHEL + +5. Replace the `pick` keyword with `squash` on all but the first commit. + + pick 1a79f55 Tweak some of the other text for grammar + squash 53e4983 Fix a link + squash 3ce07bb Add a new line about RHEL + + After you save the changes and quit from the editor, git starts + the rebase, reporting the progress along the way. Sometimes + your changes can conflict with the work of others. If git + encounters a conflict, it stops the rebase, and prints guidance + for how to correct the conflict. + +6. Edit and save your commit message. + + $ git commit -s + + Make sure your message includes your signature. + +7. Force push any changes to your fork on GitHub. + + $ git push -f origin 11038-fix-rhel-link + + +## Where to go next + +At this point, you should understand how to work on an issue. In the next +section, you [learn how to make a pull request](/project/create-pr/). diff --git a/docs/reference/api/README.md b/docs/reference/api/README.md new file mode 100644 index 00000000..ec1cbcb2 --- /dev/null +++ b/docs/reference/api/README.md @@ -0,0 +1,9 @@ +This directory holds the authoritative specifications of APIs defined and implemented by Docker. Currently this includes: + + * The remote API by which a docker node can be queried over HTTP + * The registry API by which a docker node can download and upload + images for storage and sharing + * The index search API by which a docker node can search the public + index for images to download + * The docker.io OAuth and accounts API which 3rd party services can + use to access account information diff --git a/docs/reference/api/_static/io_oauth_authorization_page.png b/docs/reference/api/_static/io_oauth_authorization_page.png new file mode 100644 index 00000000..455d631e Binary files /dev/null and b/docs/reference/api/_static/io_oauth_authorization_page.png differ diff --git a/docs/reference/api/docker-io_api.md b/docs/reference/api/docker-io_api.md new file mode 100644 index 00000000..f798d3e3 --- /dev/null +++ b/docs/reference/api/docker-io_api.md @@ -0,0 +1,14 @@ + + +# Docker Hub API + +This API is deprecated as of 1.7. To view the old version, see the [Docker Hub API](https://docs.docker.com/v1.7/reference/api/docker-io_api/) in the 1.7 documentation. + diff --git a/docs/reference/api/docker_io_accounts_api.md b/docs/reference/api/docker_io_accounts_api.md new file mode 100644 index 00000000..fd9b2787 --- /dev/null +++ b/docs/reference/api/docker_io_accounts_api.md @@ -0,0 +1,276 @@ + + +# docker.io accounts API + +## Get a single user + +`GET /api/v1.1/users/:username/` + +Get profile info for the specified user. + +Parameters: + +- **username** – username of the user whose profile info is being + requested. + +Request Headers: + +- **Authorization** – required authentication credentials of + either type HTTP Basic or OAuth Bearer Token. + +Status Codes: + +- **200** – success, user data returned. +- **401** – authentication error. +- **403** – permission error, authenticated user must be the user + whose data is being requested, OAuth access tokens must have + `profile_read` scope. +- **404** – the specified username does not exist. + +**Example request**: + + GET /api/v1.1/users/janedoe/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id": 2, + "username": "janedoe", + "url": "https://www.docker.io/api/v1.1/users/janedoe/", + "date_joined": "2014-02-12T17:58:01.431312Z", + "type": "User", + "full_name": "Jane Doe", + "location": "San Francisco, CA", + "company": "Success, Inc.", + "profile_url": "https://docker.io/", + "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm" + "email": "jane.doe@example.com", + "is_active": true + } + +## Update a single user + +`PATCH /api/v1.1/users/:username/` + +Update profile info for the specified user. + +Parameters: + +- **username** – username of the user whose profile info is being + updated. + +Json Parameters: + +- **full_name** (*string*) – (optional) the new name of the user. +- **location** (*string*) – (optional) the new location. +- **company** (*string*) – (optional) the new company of the user. +- **profile_url** (*string*) – (optional) the new profile url. +- **gravatar_email** (*string*) – (optional) the new Gravatar + email address. + +Request Headers: + +- **Authorization** – required authentication credentials of + either type HTTP Basic or OAuth Bearer Token. +- **Content-Type** – MIME Type of post data. JSON, url-encoded + form data, etc. + +Status Codes: + +- **200** – success, user data updated. +- **400** – post data validation error. +- **401** – authentication error. +- **403** – permission error, authenticated user must be the user + whose data is being updated, OAuth access tokens must have + `profile_write` scope. +- **404** – the specified username does not exist. + +**Example request**: + + PATCH /api/v1.1/users/janedoe/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= + + { + "location": "Private Island", + "profile_url": "http://janedoe.com/", + "company": "Retired", + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id": 2, + "username": "janedoe", + "url": "https://www.docker.io/api/v1.1/users/janedoe/", + "date_joined": "2014-02-12T17:58:01.431312Z", + "type": "User", + "full_name": "Jane Doe", + "location": "Private Island", + "company": "Retired", + "profile_url": "http://janedoe.com/", + "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm" + "email": "jane.doe@example.com", + "is_active": true + } + +## List email addresses for a user + +`GET /api/v1.1/users/:username/emails/` + +List email info for the specified user. + +Parameters: + +- **username** – username of the user whose profile info is being + updated. + +Request Headers: + +- **Authorization** – required authentication credentials of + either type HTTP Basic or OAuth Bearer Token + +Status Codes: + +- **200** – success, user data updated. +- **401** – authentication error. +- **403** – permission error, authenticated user must be the user + whose data is being requested, OAuth access tokens must have + `email_read` scope. +- **404** – the specified username does not exist. + +**Example request**: + + GET /api/v1.1/users/janedoe/emails/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "email": "jane.doe@example.com", + "verified": true, + "primary": true + } + ] + +## Add email address for a user + +`POST /api/v1.1/users/:username/emails/` + +Add a new email address to the specified user's account. The email +address must be verified separately, a confirmation email is not +automatically sent. + +Json Parameters: + +- **email** (*string*) – email address to be added. + +Request Headers: + +- **Authorization** – required authentication credentials of + either type HTTP Basic or OAuth Bearer Token. +- **Content-Type** – MIME Type of post data. JSON, url-encoded + form data, etc. + +Status Codes: + +- **201** – success, new email added. +- **400** – data validation error. +- **401** – authentication error. +- **403** – permission error, authenticated user must be the user + whose data is being requested, OAuth access tokens must have + `email_write` scope. +- **404** – the specified username does not exist. + +**Example request**: + + POST /api/v1.1/users/janedoe/emails/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Content-Type: application/json + Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM + + { + "email": "jane.doe+other@example.com" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "email": "jane.doe+other@example.com", + "verified": false, + "primary": false + } + +## Delete email address for a user + +`DELETE /api/v1.1/users/:username/emails/` + +Delete an email address from the specified user's account. You +cannot delete a user's primary email address. + +Json Parameters: + +- **email** (*string*) – email address to be deleted. + +Request Headers: + +- **Authorization** – required authentication credentials of + either type HTTP Basic or OAuth Bearer Token. +- **Content-Type** – MIME Type of post data. JSON, url-encoded + form data, etc. + +Status Codes: + +- **204** – success, email address removed. +- **400** – validation error. +- **401** – authentication error. +- **403** – permission error, authenticated user must be the user + whose data is being requested, OAuth access tokens must have + `email_write` scope. +- **404** – the specified username or email address does not + exist. + +**Example request**: + + DELETE /api/v1.1/users/janedoe/emails/ HTTP/1.1 + Host: www.docker.io + Accept: application/json + Content-Type: application/json + Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM + + { + "email": "jane.doe+other@example.com" + } + +**Example response**: + + HTTP/1.1 204 NO CONTENT + Content-Length: 0 diff --git a/docs/reference/api/docker_remote_api.md b/docs/reference/api/docker_remote_api.md new file mode 100644 index 00000000..ba0dc371 --- /dev/null +++ b/docs/reference/api/docker_remote_api.md @@ -0,0 +1,301 @@ + + +# Docker Remote API + + - By default the Docker daemon listens on `unix:///var/run/docker.sock` + and the client must have `root` access to interact with the daemon. + - If the Docker daemon is set to use an encrypted TCP socket (`--tls`, + or `--tlsverify`) as with Boot2Docker 1.3.0, then you need to add extra + parameters to `curl` or `wget` when making test API requests: + `curl --insecure --cert ~/.docker/cert.pem --key ~/.docker/key.pem https://boot2docker:2376/images/json` + or + `wget --no-check-certificate --certificate=$DOCKER_CERT_PATH/cert.pem --private-key=$DOCKER_CERT_PATH/key.pem https://boot2docker:2376/images/json -O - -q` + - If a group named `docker` exists on your system, docker will apply + ownership of the socket to the group. + - The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport STDOUT, STDIN, + and STDERR. + - Since API version 1.2, the auth configuration is now handled client + side, so the client has to send the `authConfig` as a `POST` in `/images/(name)/push`. + - authConfig, set as the `X-Registry-Auth` header, is currently a Base64 + encoded (JSON) string with the following structure: + `{"username": "string", "password": "string", "email": "string", + "serveraddress" : "string", "auth": ""}`. Notice that `auth` is to be left + empty, `serveraddress` is a domain/ip without protocol, and that double + quotes (instead of single ones) are required. + - The Remote API uses an open schema model. In this model, unknown + properties in incoming messages will be ignored. + Client applications need to take this into account to ensure + they will not break when talking to newer Docker daemons. + +The current version of the API is v1.20 + +Calling `/info` is the same as calling +`/v1.20/info`. + +You can still call an old version of the API using +`/v1.19/info`. + +## Docker Events + +The following diagram depicts the container states accessible through the API. + +![States](../images/event_state.png) + +Some container-related events are not affected by container state, so they are not included in this diagram. These events are: + +* **export** emitted by `docker export` +* **exec_create** emitted by `docker exec` +* **exec_start** emitted by `docker exec` after **exec_create** + +Running `docker rmi` emits an **untag** event when removing an image name. The `rmi` command may also emit **delete** events when images are deleted by ID directly or by deleting the last tag referring to the image. + +> **Acknowledgement**: This diagram and the accompanying text were used with the permission of Matt Good and Gilder Labs. See Matt's original blog post [Docker Events Explained](http://gliderlabs.com/blog/2015/04/14/docker-events-explained/). + +## v1.20 + +### Full documentation + +[*Docker Remote API v1.20*](/reference/api/docker_remote_api_v1.20/) + +### What's new + +`GET /containers/(id)/archive` + +**New!** +Get an archive of filesystem content from a container. + +`PUT /containers/(id)/archive` + +**New!** +Upload an archive of content to be extracted to an +existing directory inside a container's filesystem. + +`POST /containers/(id)/copy` + +**Deprecated!** +This copy endpoint has been deprecated in favor of the above `archive` endpoint +which can be used to download files and directories from a container. + +**New!** +The `hostConfig` option now accepts the field `GroupAdd`, which specifies a list of additional +groups that the container process will run as. + +## v1.19 + +### Full documentation + +[*Docker Remote API v1.19*](/reference/api/docker_remote_api_v1.19/) + +### What's new + +**New!** +When the daemon detects a version mismatch with the client, usually when +the client is newer than the daemon, an HTTP 400 is now returned instead +of a 404. + +`GET /containers/(id)/stats` + +**New!** +You can now supply a `stream` bool to get only one set of stats and +disconnect + +`GET /containers/(id)/logs` + +**New!** + +This endpoint now accepts a `since` timestamp parameter. + +`GET /info` + +**New!** + +The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and `SwapLimit` +are now returned as boolean instead of as an int. + +In addition, the end point now returns the new boolean fields +`CpuCfsPeriod`, `CpuCfsQuota`, and `OomKillDisable`. + +## v1.18 + +### Full documentation + +[*Docker Remote API v1.18*](/reference/api/docker_remote_api_v1.18/) + +### What's new + +`GET /version` + +**New!** +This endpoint now returns `Os`, `Arch` and `KernelVersion`. + +`POST /containers/create` + +`POST /containers/(id)/start` + +**New!** +You can set ulimit settings to be used within the container. + +`GET /info` + +**New!** +This endpoint now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. + +`GET /images/json` + +**New!** +Added a `RepoDigests` field to include image digest information. + +`POST /build` + +**New!** +Builds can now set resource constraints for all containers created for the build. + +**New!** +(`CgroupParent`) can be passed in the host config to setup container cgroups under a specific cgroup. + +`POST /build` + +**New!** +Closing the HTTP request will now cause the build to be canceled. + +`POST /containers/(id)/exec` + +**New!** +Add `Warnings` field to response. + +## v1.17 + +### Full documentation + +[*Docker Remote API v1.17*](/reference/api/docker_remote_api_v1.17/) + +### What's new + +The build supports `LABEL` command. Use this to add metadata +to an image. For example you could add data describing the content of an image. + +`LABEL "com.example.vendor"="ACME Incorporated"` + +**New!** +`POST /containers/(id)/attach` and `POST /exec/(id)/start` + +**New!** +Docker client now hints potential proxies about connection hijacking using HTTP Upgrade headers. + +`POST /containers/create` + +**New!** +You can set labels on container create describing the container. + +`GET /containers/json` + +**New!** +The endpoint returns the labels associated with the containers (`Labels`). + +`GET /containers/(id)/json` + +**New!** +This endpoint now returns the list current execs associated with the container (`ExecIDs`). +This endpoint now returns the container labels (`Config.Labels`). + +`POST /containers/(id)/rename` + +**New!** +New endpoint to rename a container `id` to a new name. + +`POST /containers/create` +`POST /containers/(id)/start` + +**New!** +(`ReadonlyRootfs`) can be passed in the host config to mount the container's +root filesystem as read only. + +`GET /containers/(id)/stats` + +**New!** +This endpoint returns a live stream of a container's resource usage statistics. + +`GET /images/json` + +**New!** +This endpoint now returns the labels associated with each image (`Labels`). + + +## v1.16 + +### Full documentation + +[*Docker Remote API v1.16*](/reference/api/docker_remote_api_v1.16/) + +### What's new + +`GET /info` + +**New!** +`info` now returns the number of CPUs available on the machine (`NCPU`), +total memory available (`MemTotal`), a user-friendly name describing the running Docker daemon (`Name`), a unique ID identifying the daemon (`ID`), and +a list of daemon labels (`Labels`). + +`POST /containers/create` + +**New!** +You can set the new container's MAC address explicitly. + +**New!** +Volumes are now initialized when the container is created. + +`POST /containers/(id)/copy` + +**New!** +You can now copy data which is contained in a volume. + +## v1.15 + +### Full documentation + +[*Docker Remote API v1.15*](/reference/api/docker_remote_api_v1.15/) + +### What's new + +`POST /containers/create` + +**New!** +It is now possible to set a container's HostConfig when creating a container. +Previously this was only available when starting a container. + +## v1.14 + +### Full documentation + +[*Docker Remote API v1.14*](/reference/api/docker_remote_api_v1.14/) + +### What's new + +`DELETE /containers/(id)` + +**New!** +When using `force`, the container will be immediately killed with SIGKILL. + +`POST /containers/(id)/start` + +**New!** +The `hostConfig` option now accepts the field `CapAdd`, which specifies a list of capabilities +to add, and the field `CapDrop`, which specifies a list of capabilities to drop. + +`POST /images/create` + +**New!** +The `fromImage` and `repo` parameters now supports the `repo:tag` format. +Consequently, the `tag` parameter is now obsolete. Using the new format and +the `tag` parameter at the same time will return an error. + + diff --git a/docs/reference/api/docker_remote_api_v1.0.md b/docs/reference/api/docker_remote_api_v1.0.md new file mode 100644 index 00000000..0ae774ed --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.0.md @@ -0,0 +1,993 @@ + + +# Docker Remote API v1.0 + +# 1. Brief introduction + +- The Remote API is replacing rcli +- Default port in the docker daemon is 2375 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0" + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0" + }, + { + "Id": "3176a2479c92", + "Image": "centos:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0" + }, + { + "Id": "4cb07b47f9fb", + "Image": "fedora:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0" + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"ubuntu", + "Volumes":{}, + "VolumesFrom":"" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/(format)` + +List images `format` could be json or viz (json default) + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Repository":"ubuntu", + "Tag":"precise", + "Id":"b750fe79269d", + "Created":1364102658 + }, + { + "Repository":"ubuntu", + "Tag":"12.04", + "Id":"b750fe79269d", + "Created":1364102658 + } + ] + +**Example request**: + + GET /images/viz HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + digraph docker { + "d82cbacda43a" -> "074be284591f" + "1496068ca813" -> "08306dc45919" + "08306dc45919" -> "0e7893146ac2" + "b750fe79269d" -> "1496068ca813" + base -> "27cf78414709" [style=invis] + "f71189fff3de" -> "9a33b36209ed" + "27cf78414709" -> "b750fe79269d" + "0e7893146ac2" -> "d6434d954665" + "d6434d954665" -> "d82cbacda43a" + base -> "e9aa60c60128" [style=invis] + "074be284591f" -> "f71189fff3de" + "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + base [style=invisible] + } + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by defaul + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + + {{ TAR STREAM }} + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/centos/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"centos", + "Volumes":null, + "VolumesFrom":"" + } + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/fedora/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + + > **Example request**: + > + > POST /images/test/push HTTP/1.1 + > + > **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such image +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com) + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Name":"cespare/sshd", + "Description":"" + }, + { + "Name":"johnfuller/sshd", + "Description":"" + }, + { + "Name":"dhrp/mongodb-sshd", + "Description":"" + } + ] + + :query term: term to search + :statuscode 200: no error + :statuscode 500: server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + + {{ STREAM }} + +Query Parameters: + +- **t** – repository name to be applied to the resulting image in + case of success + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get default username and email + +`GET /auth` + +Get the default username and email + +**Example request**: + + GET /auth HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "username":"hannibal", + "email":"hannibal@a-team.com" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration and store i + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + > + > **Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Cmd": ["cat", "/world"], + "PortSpecs":["22"] + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's + +## 3.2 Hijacking + +In this first version of the API, some of the endpoints, like /attach, +/pull or /push uses hijacking to transport stdin, stdout and stderr on +the same socket. This might change in the future. diff --git a/docs/reference/api/docker_remote_api_v1.1.md b/docs/reference/api/docker_remote_api_v1.1.md new file mode 100644 index 00000000..6f416de5 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.1.md @@ -0,0 +1,1005 @@ + + +# Docker Remote API v1.1 + +# 1. Brief introduction + +- The Remote API is replacing rcli +- Default port in the docker daemon is 2375 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0" + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0" + }, + { + "Id": "3176a2479c92", + "Image": "centos:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0" + }, + { + "Id": "4cb07b47f9fb", + "Image": "fedora:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0" + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"ubuntu", + "Volumes":{}, + "VolumesFrom":"" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/(format)` + +List images `format` could be json or viz (json default) + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Repository":"ubuntu", + "Tag":"precise", + "Id":"b750fe79269d", + "Created":1364102658 + }, + { + "Repository":"ubuntu", + "Tag":"12.04", + "Id":"b750fe79269d", + "Created":1364102658 + } + ] + +**Example request**: + + GET /images/viz HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + digraph docker { + "d82cbacda43a" -> "074be284591f" + "1496068ca813" -> "08306dc45919" + "08306dc45919" -> "0e7893146ac2" + "b750fe79269d" -> "1496068ca813" + base -> "27cf78414709" [style=invis] + "f71189fff3de" -> "9a33b36209ed" + "27cf78414709" -> "b750fe79269d" + "0e7893146ac2" -> "d6434d954665" + "d6434d954665" -> "d82cbacda43a" + base -> "e9aa60c60128" [style=invis] + "074be284591f" -> "f71189fff3de" + "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + base [style=invisible] + } + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by defaul + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/centos/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"centos", + "Volumes":null, + "VolumesFrom":"" + } + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/fedora/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + + > **Example request**: + > + > POST /images/test/push HTTP/1.1 + > + > **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such image +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com) + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Name":"cespare/sshd", + "Description":"" + }, + { + "Name":"johnfuller/sshd", + "Description":"" + }, + { + "Name":"dhrp/mongodb-sshd", + "Description":"" + } + ] + + :query term: term to search + :statuscode 200: no error + :statuscode 500: server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + + {{ STREAM }} + +Query Parameters: + +   + +- **t** – tag to be applied to the resulting image in case of + success + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get default username and email + +`GET /auth` + +Get the default username and email + +**Example request**: + + GET /auth HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "username":"hannibal", + "email":"hannibal@a-team.com" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration and store i + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Cmd": ["cat", "/world"], + "PortSpecs":["22"] + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run` : + + - Create the container + + - If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + + - Start the container + + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + + - If in detached mode or only stdin is attached: + - Display the container's + +## 3.2 Hijacking + +In this version of the API, /attach uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. diff --git a/docs/reference/api/docker_remote_api_v1.10.md b/docs/reference/api/docker_remote_api_v1.10.md new file mode 100644 index 00000000..bf617fe3 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.10.md @@ -0,0 +1,1354 @@ + + +# Docker Remote API v1.10 + +## 1. Brief introduction + + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +   + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Query Parameters: + +   + +- **name** – Assign the specified name to the container. Mus + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Image": "ubuntu", + "Volumes": {}, + "WorkingDir":"" + + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +   + +- **ps\_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id` 's filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "VolumesFrom": ["parent", "other:ro"] + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will wait for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` +](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1) + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns + the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + + `DELETE /containers/(id*) +: Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** – 1/True/true or 0/False/false, Removes the container + even if it was running. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing + i + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image + `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"] + "Image":"ubuntu", + "Volumes":null, + "WorkingDir":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + + `DELETE /images/(name*) +: Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#add)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – git or HTTP/HTTPS URI build source +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **rm** - remove intermediate containers after a successful build + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + + +Json Parameters: + + + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository + specified by `name`. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run` : + + - Create the container + + - If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + + - Start the container + + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + + - If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.11.md b/docs/reference/api/docker_remote_api_v1.11.md new file mode 100644 index 00000000..3a1c9532 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.11.md @@ -0,0 +1,1385 @@ + + +# Docker Remote API v1.11 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` + and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +   + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "VolumesFrom":"", + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Query Parameters: + +- **name** – Assign the specified name to the container. Mus + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +   + +- **follow** – 1/True/true or 0/False/false, return stream. + Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, if logs=true, prin + timestamps for every log line. Default false + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "VolumesFrom": ["parent", "other:ro"] + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will wait for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1) + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** – 1/True/true or 0/False/false, Removes the container + even if it was running. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"ubuntu", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – git or HTTP/HTTPS URI build source +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **rm** - remove intermediate containers after a successful build + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Images": 16, + "Driver": "btrfs", + "ExecutionDriver": "native-0.1", + "KernelVersion": "3.12.0-1-amd64" + "Debug": false, + "NFd": 11, + "NGoroutines": 21, + "NEventsListener": 0, + "InitPath": "/usr/bin/docker", + "IndexServerAddress": ["https://index.docker.io/v1/"], + "MemoryLimit": true, + "SwapLimit": false, + "IPv4Forwarding": true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.12.md b/docs/reference/api/docker_remote_api_v1.12.md new file mode 100644 index 00000000..2016e184 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.12.md @@ -0,0 +1,1451 @@ + + +# Docker Remote API v1.12 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by defaul +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** – a JSON encoded value of the filters (a map[string][]string) + to process on the images list. + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "Domainname": "", + "User":"", + "Memory":0, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Query Parameters: + +   + +- **name** – Assign the specified name to the container. Mus + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +   + +- **follow** – 1/True/true or 0/False/false, return stream. + Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, if logs=true, prin + timestamps for every log line. Default false + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "Links":["redis3:redis"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "VolumesFrom": ["parent", "other:ro"] + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will wait for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. + Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1 + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** – 1/True/true or 0/False/false, Removes the container + even if it was running. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Query Parameters: + +   + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - dangling=true +- **filter** - only return images with the specified name + + + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – git or HTTP/HTTPS URI build source +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **rm** - remove intermediate containers after a successful build (default behavior) +- **forcerm** - always remove intermediate containers (includes rm) + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Images": 16, + "Driver": "btrfs", + "ExecutionDriver": "native-0.1", + "KernelVersion": "3.12.0-1-amd64" + "Debug": false, + "NFd": 11, + "NGoroutines": 21, + "NEventsListener": 0, + "InitPath": "/usr/bin/docker", + "IndexServerAddress": ["https://index.docker.io/v1/"], + "MemoryLimit": true, + "SwapLimit": false, + "IPv4Forwarding": true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ApiVersion": "1.12", + "Version": "0.2.2", + "GitCommit": "5a2a5cc+CHANGES", + "GoVersion": "go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.13.md b/docs/reference/api/docker_remote_api_v1.13.md new file mode 100644 index 00000000..95905731 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.13.md @@ -0,0 +1,1441 @@ + + +# Docker Remote API v1.13 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "Domainname": "", + "User":"", + "Memory":0, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Query Parameters: + +   + +- **name** – Assign the specified name to the container. Mus + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default false +- **stdout** – 1/True/true or 0/False/false, show stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, show stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, print timestamps for every + log line. Default false +- **tail** – Output specified number of lines at the end of logs: `all` or + ``. Default all + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "Links":["redis3:redis"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "VolumesFrom": ["parent", "other:ro"] + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will wait for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. + Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1 + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** – 1/True/true or 0/False/false, Removes the container + even if it was running. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - dangling=true +- **filter** - only return images with the specified name + +### Create an image + +`POST /images/create` + +Create an image, either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – git or HTTP/HTTPS URI build source +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **rm** - remove intermediate containers after a successful build (default behavior) +- **forcerm** - always remove intermediate containers (includes rm) + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Images": 16, + "Driver": "btrfs", + "ExecutionDriver": "native-0.1", + "KernelVersion": "3.12.0-1-amd64" + "Debug": false, + "NFd": 11, + "NGoroutines": 21, + "NEventsListener": 0, + "InitPath": "/usr/bin/docker", + "IndexServerAddress": ["https://index.docker.io/v1/"], + "MemoryLimit": true, + "SwapLimit": false, + "IPv4Forwarding": true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ApiVersion": "1.12", + "Version": "0.2.2", + "GitCommit": "5a2a5cc+CHANGES", + "GoVersion": "go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.14.md b/docs/reference/api/docker_remote_api_v1.14.md new file mode 100644 index 00000000..b91431e6 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.14.md @@ -0,0 +1,1450 @@ + + +# Docker Remote API v1.14 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes +- **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: + - exited=<int> -- containers with exit code of <int> + - status=(restarting|running|paused|exited) + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "Domainname": "", + "User":"", + "Memory":0, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. +- **config** – the container's configuration + +Query Parameters: + +- **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default false +- **stdout** – 1/True/true or 0/False/false, show stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, show stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, print timestamps for every + log line. Default false +- **tail** – Output specified number of lines at the end of logs: `all` or + ``. Default all + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "Links":["redis3:redis"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] + } + +**Example response**: + + HTTP/1.1 204 No Content + +Json Parameters: + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will wait for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. + Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1 + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - dangling=true +- **filter** - only return images with the specified name + +### Create an image + +`POST /images/create` + +Create an image, either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – git or HTTP/HTTPS URI build source +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **rm** - remove intermediate containers after a successful build (default behavior) +- **forcerm** - always remove intermediate containers (includes rm) + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Images": 16, + "Driver": "btrfs", + "ExecutionDriver": "native-0.1", + "KernelVersion": "3.12.0-1-amd64" + "Debug": false, + "NFd": 11, + "NGoroutines": 21, + "NEventsListener": 0, + "InitPath": "/usr/bin/docker", + "IndexServerAddress": ["https://index.docker.io/v1/"], + "MemoryLimit": true, + "SwapLimit": false, + "IPv4Forwarding": true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ApiVersion": "1.12", + "Version": "0.2.2", + "GitCommit": "5a2a5cc+CHANGES", + "GoVersion": "go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.15.md b/docs/reference/api/docker_remote_api_v1.15.md new file mode 100644 index 00000000..03c03cfc --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.15.md @@ -0,0 +1,1744 @@ + + +# Docker Remote API v1.15 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: + - exited=<int> -- containers with exit code of <int> + - status=(restarting|running|paused|exited) + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpts": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f91ddc4b01e079c4481a8340bbbeca4dbd33d6e4a10662e499f8eacbb5bf252b" + "Warnings": [] + } + +Json Parameters: + +- **Hostname** - A string value containing the desired hostname to use for the + container. +- **Domainname** - A string value containing the desired domain name to use + for the container. +- **User** - A string value containing the user to use inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap. +- **CpuShares** - An integer value containing the CPU Shares for container + (ie. the relative weight vs other containers). + **CpuSet** - String value containing the cgroups Cpuset to use. +- **AttachStdin** - Boolean value, attaches to stdin. +- **AttachStdout** - Boolean value, attaches to stdout. +- **AttachStderr** - Boolean value, attaches to stderr. +- **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entrypoint for the container a string or an array + of strings +- **Image** - String value containing the image name to use for the container +- **Volumes** – An object mapping mountpoint paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string value containing the working dir for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **SecurityOpts**: A list of string values to customize labels for MLS + systems, such as SELinux. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be + in the form of "container_name:alias". + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + +Query Parameters: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default false +- **stdout** – 1/True/true or 0/False/false, show stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, show stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default false +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Resize a container TTY + +`GET /containers/(id)/resize?h=&w=` + +Resize the TTY of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +Status Codes: + +- **200** – no error +- **404** – No such container +- **500** – bad file descriptor + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } + +**Example response**: + + HTTP/1.1 204 No Content + +Json Parameters: + +- **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). +- **Links** - A list of links for the container. Each link entry should be of + of the form "container_name:alias". +- **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. +- **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. +- **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. +- **Privileged** - Gives the container full access to the host. Specified as + a boolean value. +- **Dns** - A list of dns servers for the container to use. +- **DnsSearch** - A list of DNS search domains +- **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` +- **CapAdd** - A list of kernel capabilities to add to the container. +- **Capdrop** - A list of kernel capabilities to drop from the container. +- **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. +- **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` +- **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1 + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - dangling=true +- **filter** - only return images with the specified name + +### Create an image + +`POST /images/create` + +Create an image, either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + + Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – git or HTTP/HTTPS URI build source +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **rm** - remove intermediate containers after a successful build (default behavior) +- **forcerm** - always remove intermediate containers (includes rm) + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Images": 16, + "Driver": "btrfs", + "ExecutionDriver": "native-0.1", + "KernelVersion": "3.12.0-1-amd64" + "Debug": false, + "NFd": 11, + "NGoroutines": 21, + "NEventsListener": 0, + "InitPath": "/usr/bin/docker", + "IndexServerAddress": ["https://index.docker.io/v1/"], + "MemoryLimit": true, + "SwapLimit": false, + "IPv4Forwarding": true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ApiVersion": "1.12", + "Version": "0.2.2", + "GitCommit": "5a2a5cc+CHANGES", + "GoVersion": "go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images. + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +ubuntu:latest), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +### Exec Create + +`POST /containers/(id)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "Cmd": [ + "date" + ], + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id": "f90e34656806" + } + +Json Parameters: + +- **AttachStdin** - Boolean value, attaches to stdin of the exec command. +- **AttachStdout** - Boolean value, attaches to stdout of the exec command. +- **AttachStderr** - Boolean value, attaches to stderr of the exec command. +- **Tty** - Boolean value to allocate a pseudo-TTY +- **Cmd** - Command to run specified as a string or an array of strings. + + +Status Codes: + +- **201** – no error +- **404** – no such container + +### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up exec instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false, + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + {{ STREAM }} + +Json Parameters: + +- **Detach** - Detach from the exec command +- **Tty** - Boolean value to allocate a pseudo-TTY + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + + **Stream details**: + Similar to the stream behavior of `POST /container/(id)/attach` API + +### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the tty session used by the exec command `id`. +This API is valid only if `tty` was specified as part of creating and starting the exec command. + +**Example request**: + + POST /exec/e90e34656806/resize HTTP/1.1 + Content-Type: plain/text + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: plain/text + +Query Parameters: + +- **h** – height of tty session +- **w** – width + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: +- Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: +- Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.16.md b/docs/reference/api/docker_remote_api_v1.16.md new file mode 100644 index 00000000..042a1556 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.16.md @@ -0,0 +1,1813 @@ + + +# Docker Remote API v1.16 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleep_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: + - exited=<int> -- containers with exit code of <int> + - status=(restarting|running|paused|exited) + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpts": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **Hostname** - A string value containing the desired hostname to use for the + container. +- **Domainname** - A string value containing the desired domain name to use + for the container. +- **User** - A string value containing the user to use inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap. +- **CpuShares** - An integer value containing the CPU Shares for container + (ie. the relative weight vs other containers). + **CpuSet** - String value containing the cgroups Cpuset to use. +- **AttachStdin** - Boolean value, attaches to stdin. +- **AttachStdout** - Boolean value, attaches to stdout. +- **AttachStderr** - Boolean value, attaches to stderr. +- **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entrypoint for the container a string or an array + of strings +- **Image** - String value containing the image name to use for the container +- **Volumes** – An object mapping mountpoint paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string value containing the working dir for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **SecurityOpts**: A list of string values to customize labels for MLS + systems, such as SELinux. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be + in the form of "container_name:alias". + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + +Query Parameters: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default false +- **stdout** – 1/True/true or 0/False/false, show stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, show stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default false +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Resize a container TTY + +`POST /containers/(id)/resize?h=&w=` + +Resize the TTY for container with `id`. The container must be restarted for the resize to take effect. + +**Example request**: + + POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +Status Codes: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1 + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - dangling=true +- **filter** - only return images with the specified name + +### Create an image + +`POST /images/create` + +Create an image, either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + + Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – git or HTTP/HTTPS URI build source +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **pull** - attempt to pull the image even if an older image exists locally +- **rm** - remove intermediate containers after a successful build (default behavior) +- **forcerm** - always remove intermediate containers (includes rm) + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Driver":"btrfs", + "DriverStatus": [[""]], + "ExecutionDriver":"native-0.1", + "KernelVersion":"3.12.0-1-amd64" + "NCPU":1, + "MemTotal":2099236864, + "Name":"prod-server-42", + "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "NEventsListener":0, + "InitPath":"/usr/bin/docker", + "InitSha1":"", + "IndexServerAddress":["https://index.docker.io/v1/"], + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true, + "Labels":["storage=ssd"], + "DockerRootDir": "/var/lib/docker", + "OperatingSystem": "Boot2Docker", + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ApiVersion": "1.12", + "Version": "0.2.2", + "GitCommit": "5a2a5cc+CHANGES", + "GoVersion": "go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - event=<string> -- event to filter + - image=<string> -- image to filter + - container=<string> -- container to filter + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images. + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +ubuntu:latest), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +### Exec Create + +`POST /containers/(id)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "Cmd": [ + "date" + ], + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id": "f90e34656806" + } + +Json Parameters: + +- **AttachStdin** - Boolean value, attaches to stdin of the exec command. +- **AttachStdout** - Boolean value, attaches to stdout of the exec command. +- **AttachStderr** - Boolean value, attaches to stderr of the exec command. +- **Tty** - Boolean value to allocate a pseudo-TTY +- **Cmd** - Command to run specified as a string or an array of strings. + + +Status Codes: + +- **201** – no error +- **404** – no such container + +### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up exec instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false, + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + {{ STREAM }} + +Json Parameters: + +- **Detach** - Detach from the exec command +- **Tty** - Boolean value to allocate a pseudo-TTY + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + + **Stream details**: + Similar to the stream behavior of `POST /container/(id)/attach` API + +### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the tty session used by the exec command `id`. +This API is valid only if `tty` was specified as part of creating and starting the exec command. + +**Example request**: + + POST /exec/e90e34656806/resize HTTP/1.1 + Content-Type: plain/text + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: plain/text + +Query Parameters: + +- **h** – height of tty session +- **w** – width + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + +### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the exec command `id`. + +**Example request**: + + GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "Memory" : 0, + "MemorySwap" : 0, + "CpuShares" : 0, + "Cpuset" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs" : null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +Status Codes: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: +- Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: +- Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.17.md b/docs/reference/api/docker_remote_api_v1.17.md new file mode 100644 index 00000000..1977eb1f --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.17.md @@ -0,0 +1,1987 @@ + + +# Docker Remote API v1.17 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: + - exited=<int> -- containers with exit code of <int> + - status=(restarting|running|paused|exited) + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "SecurityOpt": [""] + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **Hostname** - A string value containing the desired hostname to use for the + container. +- **Domainname** - A string value containing the desired domain name to use + for the container. +- **User** - A string value containing the user to use inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap, + always use this with `memory`, and make the value larger than `memory`. +- **CpuShares** - An integer value containing the CPU Shares for container + (ie. the relative weight vs other containers). + **CpuSet** - String value containing the cgroups Cpuset to use. +- **AttachStdin** - Boolean value, attaches to stdin. +- **AttachStdout** - Boolean value, attaches to stdout. +- **AttachStderr** - Boolean value, attaches to stderr. +- **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entrypoint for the container a string or an array + of strings +- **Image** - String value containing the image name to use for the container +- **Volumes** – An object mapping mountpoint paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string value containing the working dir for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be + in the form of "container_name:alias". + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + +Query Parameters: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "CpuShares": 0, + "Cpuset": "", + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "MacAddress": "", + "Memory": 0, + "MemorySwap": 0, + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "NetworkMode": "bridge", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "SecurityOpt": null, + "VolumesFrom": null + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": false, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default false +- **stdout** – 1/True/true or 0/False/false, show stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, show stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default false +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all + +Status Codes: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container stats based on resource usage + +`GET /containers/(id)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Resize a container TTY + +`POST /containers/(id)/resize?h=&w=` + +Resize the TTY for container with `id`. The container must be restarted for the resize to take effect. + +**Example request**: + + POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +Status Codes: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Rename a container + +`POST /containers/(id)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **name** – new name for the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1 + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - dangling=true +- **filter** - only return images with the specified name + +### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a tar archive compressed with one of the +following algorithms: identity (no compression), gzip, bzip2, xz. + +The archive must include a build instructions file, typically called +`Dockerfile` at the root of the archive. The `dockerfile` parameter may be +used to specify a different build instructions file by having its value be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which will be accessible in the build context (See the [*ADD build +command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **dockerfile** - path within the build context to the Dockerfile +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – git or HTTP/HTTPS URI build source +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **pull** - attempt to pull the image even if an older image exists locally +- **rm** - remove intermediate containers after a successful build (default behavior) +- **forcerm** - always remove intermediate containers (includes rm) + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image, either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + + Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Driver":"btrfs", + "DriverStatus": [[""]], + "ExecutionDriver":"native-0.1", + "KernelVersion":"3.12.0-1-amd64" + "NCPU":1, + "MemTotal":2099236864, + "Name":"prod-server-42", + "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "NEventsListener":0, + "InitPath":"/usr/bin/docker", + "InitSha1":"", + "IndexServerAddress":["https://index.docker.io/v1/"], + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true, + "Labels":["storage=ssd"], + "DockerRootDir": "/var/lib/docker", + "OperatingSystem": "Boot2Docker", + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ApiVersion": "1.12", + "Version": "0.2.2", + "GitCommit": "5a2a5cc+CHANGES", + "GoVersion": "go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - event=<string> -- event to filter + - image=<string> -- image to filter + - container=<string> -- container to filter + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images. + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +ubuntu:latest), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +### Exec Create + +`POST /containers/(id)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "Cmd": [ + "date" + ], + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id": "f90e34656806" + } + +Json Parameters: + +- **AttachStdin** - Boolean value, attaches to stdin of the exec command. +- **AttachStdout** - Boolean value, attaches to stdout of the exec command. +- **AttachStderr** - Boolean value, attaches to stderr of the exec command. +- **Tty** - Boolean value to allocate a pseudo-TTY +- **Cmd** - Command to run specified as a string or an array of strings. + + +Status Codes: + +- **201** – no error +- **404** – no such container + +### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up exec instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false, + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + {{ STREAM }} + +Json Parameters: + +- **Detach** - Detach from the exec command +- **Tty** - Boolean value to allocate a pseudo-TTY + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + + **Stream details**: + Similar to the stream behavior of `POST /container/(id)/attach` API + +### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the tty session used by the exec command `id`. +This API is valid only if `tty` was specified as part of creating and starting the exec command. + +**Example request**: + + POST /exec/e90e34656806/resize HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: text/plain + +Query Parameters: + +- **h** – height of tty session +- **w** – width + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + +### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the exec command `id`. + +**Example request**: + + GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "Memory" : 0, + "MemorySwap" : 0, + "CpuShares" : 0, + "Cpuset" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs" : null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +Status Codes: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: +- Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: +- Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it will switch its status code +from **200 OK** to **101 UPGRADED** and resend the same headers. + +This might change in the future. + +## 3.3 CORS Requests + +To set cross origin requests to the remote api, please add flag "--api-enable-cors" +when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.18.md b/docs/reference/api/docker_remote_api_v1.18.md new file mode 100644 index 00000000..8bdb98ff --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.18.md @@ -0,0 +1,2106 @@ + + +# Docker Remote API v1.18 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: + - exited=<int> -- containers with exit code of <int> + - status=(restarting|running|paused|exited) + - label=`key` or `label="key=value"` of a container label + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", Config: {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **Hostname** - A string value containing the desired hostname to use for the + container. +- **Domainname** - A string value containing the desired domain name to use + for the container. +- **User** - A string value containing the user to use inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap, + always use this with `memory`, and make the value larger than `memory`. +- **CpuShares** - An integer value containing the CPU Shares for container + (ie. the relative weight vs other containers). +- **Cpuset** - The same as CpusetCpus, but deprecated, please don't use. +- **CpusetCpus** - String value containing the cgroups CpusetCpus to use. +- **AttachStdin** - Boolean value, attaches to stdin. +- **AttachStdout** - Boolean value, attaches to stdout. +- **AttachStderr** - Boolean value, attaches to stderr. +- **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Labels** - Adds a map of labels that to a container. To specify a map: `{"key":"value"[,"key2":"value2"]}` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entrypoint for the container a string or an array + of strings +- **Image** - String value containing the image name to use for the container +- **Volumes** – An object mapping mountpoint paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string value containing the working dir for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to be set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +Query Parameters: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpuShares": 0, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "NetworkMode": "bridge", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": false, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with `json-file` logging driver. + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default false +- **stdout** – 1/True/true or 0/False/false, show stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, show stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default false +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all + +Status Codes: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container stats based on resource usage + +`GET /containers/(id)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +> **Note**: this functionality currently only works when using the *libcontainer* exec-driver. + +**Example request**: + + GET /containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Resize a container TTY + +`POST /containers/(id)/resize?h=&w=` + +Resize the TTY for container with `id`. The container must be restarted for the resize to take effect. + +**Example request**: + + POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +Status Codes: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Rename a container + +`POST /containers/(id)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **name** – new name for the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1 + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +**Example request, with digest information**: + + GET /images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728 + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - dangling=true + - label=`key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a tar archive compressed with one of the +following algorithms: identity (no compression), gzip, bzip2, xz. + +The archive must include a build instructions file, typically called +`Dockerfile` at the root of the archive. The `dockerfile` parameter may be +used to specify a different build instructions file by having its value be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which will be accessible in the build context (See the [*ADD build +command*](/reference/builder/#dockerbuilder)). + +The build will also be canceled if the client drops the connection by quitting +or being killed. + +Query Parameters: + +- **dockerfile** - path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the + URI specifies a filename, the file's contents are placed into a file + called `Dockerfile`. +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **pull** - attempt to pull the image even if an older image exists locally +- **rm** - remove intermediate containers after a successful build (default behavior) +- **forcerm** - always remove intermediate containers (includes rm) +- **memory** - set memory limit for build +- **memswap** - Total memory (memory + swap), `-1` to disable swap +- **cpushares** - CPU shares (relative weight) +- **cpusetcpus** - CPUs in which to allow execution, e.g., `0-3`, `0,1` + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image, either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + + Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Debug": 0, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": 1, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": 1, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": 0, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.18" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - event=<string> -- event to filter + - image=<string> -- image to filter + - container=<string> -- container to filter + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images. + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +ubuntu:latest), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +### Exec Create + +`POST /containers/(id)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "Cmd": [ + "date" + ], + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id": "f90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **AttachStdin** - Boolean value, attaches to stdin of the exec command. +- **AttachStdout** - Boolean value, attaches to stdout of the exec command. +- **AttachStderr** - Boolean value, attaches to stderr of the exec command. +- **Tty** - Boolean value to allocate a pseudo-TTY +- **Cmd** - Command to run specified as a string or an array of strings. + + +Status Codes: + +- **201** – no error +- **404** – no such container + +### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up exec instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false, + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + {{ STREAM }} + +Json Parameters: + +- **Detach** - Detach from the exec command +- **Tty** - Boolean value to allocate a pseudo-TTY + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + + **Stream details**: + Similar to the stream behavior of `POST /container/(id)/attach` API + +### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the tty session used by the exec command `id`. +This API is valid only if `tty` was specified as part of creating and starting the exec command. + +**Example request**: + + POST /exec/e90e34656806/resize HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: text/plain + +Query Parameters: + +- **h** – height of tty session +- **w** – width + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + +### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the exec command `id`. + +**Example request**: + + GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs" : null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +Status Codes: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: +- Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: +- Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it will switch its status code +from **200 OK** to **101 UPGRADED** and resend the same headers. + +This might change in the future. + +## 3.3 CORS Requests + +To set cross origin requests to the remote api please give values to +"--api-cors-header" when running docker in daemon mode. Set * will allow all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/reference/api/docker_remote_api_v1.19.md b/docs/reference/api/docker_remote_api_v1.19.md new file mode 100644 index 00000000..3068b102 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.19.md @@ -0,0 +1,2161 @@ + + +# Docker Remote API v1.19 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap + You must use this with `memory` and make the swap value larger than `memory`. +- **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). +- **CpuPeriod** - The length of a CPU period in microseconds. +- **Cpuset** - Deprecated please don't use. Use `CpusetCpus` instead. +- **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. +- **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. +- **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. +- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value"[,"key2":"value2"]}` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** – An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `container_path` to create a new volume for the container + + `host_path:container_path` to bind-mount a host path into the container + + `host_path:container_path:ro` to make the bind-mount read-only inside the container. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `syslog` available options are: `address`. + - **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +Query Parameters: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": false, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with `json-file` logging driver. + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +Status Codes: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container stats based on resource usage + +`GET /containers/(id)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +> **Note**: this functionality currently only works when using the *libcontainer* exec-driver. + +**Example request**: + + GET /containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +Query Parameters: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Resize a container TTY + +`POST /containers/(id)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +Status Codes: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Rename a container + +`POST /containers/(id)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **name** – new name for the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +Status Codes: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's `stdin`. + When the TTY is disabled, then the stream is multiplexed to separate + `stdout` and `stderr`. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in the + last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of + the `uint32` size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +**Example request, with digest information**: + + GET /images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728 + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](/reference/builder/#dockerbuilder)). + +The build is canceled if the client drops the connection by quitting +or being killed. + +Query Parameters: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – Repository name (and optionally a tag) to be applied to + the resulting image in case of success. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to disable swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). + + Request Headers: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. +- **registry** – The registry to pull from. + + Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +Request Headers: + +- **X-Registry-Auth** – Include a base64-encoded AuthConfig. + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). This API +returns both `is_trusted` and `is_automated` images. Currently, they +are considered identical. In the future, the `is_trusted` property will +be deprecated and replaced by the `is_automated` property. + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_trusted": false, + "is_automated": false, + "description": "", + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_trusted": false, + "is_automated": false, + "description": "", + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_trusted": false, + "is_automated": false, + "description": "", + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.19" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +and Docker images report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – Timestamp used for polling +- **until** – Timestamp used for polling +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `event=`; -- event to filter + - `image=`; -- image to filter + - `container=`; -- container to filter + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images. + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +### Exec Create + +`POST /containers/(id)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "Cmd": [ + "date" + ], + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id": "f90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. + + +Status Codes: + +- **201** – no error +- **404** – no such container + +### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false, + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + {{ STREAM }} + +Json Parameters: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + + **Stream details**: + Similar to the stream behavior of `POST /container/(id)/attach` API + +### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/resize HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: text/plain + +Query Parameters: + +- **h** – height of `tty` session +- **w** – width + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + +### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +Status Codes: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +## 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +## 3.3 CORS Requests + +To set cross origin requests to the remote api please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/reference/api/docker_remote_api_v1.2.md b/docs/reference/api/docker_remote_api_v1.2.md new file mode 100644 index 00000000..678853ea --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.2.md @@ -0,0 +1,1024 @@ + + +# Docker Remote API v1.2 + +# 1. Brief introduction + +- The Remote API is replacing rcli +- Default port in the docker daemon is 2375 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "centos:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "fedora:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"ubuntu", + "Volumes":{}, + "VolumesFrom":"" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/(format)` + +List images `format` could be json or viz (json default) + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Repository":"ubuntu", + "Tag":"precise", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + }, + { + "Repository":"ubuntu", + "Tag":"12.04", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + } + ] + +**Example request**: + + GET /images/viz HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + digraph docker { + "d82cbacda43a" -> "074be284591f" + "1496068ca813" -> "08306dc45919" + "08306dc45919" -> "0e7893146ac2" + "b750fe79269d" -> "1496068ca813" + base -> "27cf78414709" [style=invis] + "f71189fff3de" -> "9a33b36209ed" + "27cf78414709" -> "b750fe79269d" + "0e7893146ac2" -> "d6434d954665" + "d6434d954665" -> "d82cbacda43a" + base -> "e9aa60c60128" [style=invis] + "074be284591f" -> "f71189fff3de" + "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + base [style=invisible] + } + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by defaul + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/centos/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"centos", + "Volumes":null, + "VolumesFrom":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/fedora/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Tag":["ubuntu:latest"], + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + + > **Example request**: + > + > POST /images/test/push HTTP/1.1 + > {{ authConfig }} + > + > **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Status Codes: + +- **204** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com) + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Name":"cespare/sshd", + "Description":"" + }, + { + "Name":"johnfuller/sshd", + "Description":"" + }, + { + "Name":"dhrp/mongodb-sshd", + "Description":"" + } + ] + + :query term: term to search + :statuscode 200: no error + :statuscode 500: server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + {{ STREAM }} + +Query Parameters: + +- **t** – repository name to be applied to the resulting image in + case of success +- **remote** – resource to fetch, as URI + +Status Codes: + +- **200** – no error +- **500** – server error + +{{ STREAM }} is the raw text output of the build command. It uses the +HTTP Hijack method in order to stream. + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Status": "Login Succeeded" + } + +Status Codes: + +- **200** – no error +- **204** – no error +- **401** – unauthorized +- **403** – forbidden +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Cmd": ["cat", "/world"], + "PortSpecs":["22"] + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run` : + + - Create the container + + - If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + + - Start the container + + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + + - If in detached mode or only stdin is attached: + - Display the container's + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + +> docker -d -H="[tcp://192.168.1.9:2375](tcp://192.168.1.9:2375)" +> -api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.20.md b/docs/reference/api/docker_remote_api_v1.20.md new file mode 100644 index 00000000..73ea4bf4 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.20.md @@ -0,0 +1,2280 @@ + + +# Docker Remote API v1.20 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"] + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"] + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"] + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap + You must use this with `memory` and make the swap value larger than `memory`. +- **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). +- **CpuPeriod** - The length of a CPU period in microseconds. +- **Cpuset** - Deprecated please don't use. Use `CpusetCpus` instead. +- **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. +- **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. +- **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. +- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. +- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value"[,"key2":"value2"]}` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Mounts** - An array of mount points in the container. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `container_path` to create a new volume for the container + + `host_path:container_path` to bind-mount a host path into the container + + `host_path:container_path:ro` to make the bind-mount read-only inside the container. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +Query Parameters: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": false, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with `json-file` logging driver. + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +Status Codes: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container stats based on resource usage + +`GET /containers/(id)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +> **Note**: this functionality currently only works when using the *libcontainer* exec-driver. + +**Example request**: + + GET /containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +Query Parameters: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Resize a container TTY + +`POST /containers/(id)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +Status Codes: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Rename a container + +`POST /containers/(id)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **name** – new name for the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +Status Codes: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's `stdin`. + When the TTY is disabled, then the stream is multiplexed to separate + `stdout` and `stderr`. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in the + last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of + the `uint32` size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Retrieving information about files and folders in a container + +`HEAD /containers/(id)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +folowing section. + +### Get an archive of a filesystem resource in a container + +`GET /containers/(id)/archive` + +Get an tar archive of a resource in the filesystem of container `id`. + +Query Parameters: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + **Note**: It is not possible to copy certain system files such as resources + under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + container. + +**Example request**: + + GET /containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {{ TAR STREAM }} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + + { + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" + } + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +Status Codes: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +Query Parameters: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +**Example request, with digest information**: + + GET /images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728 + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](/reference/builder/#dockerbuilder)). + +The build is canceled if the client drops the connection by quitting +or being killed. + +Query Parameters: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A repository name (and optionally a tag) to apply to + the resulting image in case of success. +- **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the + URI specifies a filename, the file's contents are placed into a file + called `Dockerfile`. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to disable swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). + + Request Headers: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. +- **registry** – The registry to pull from. + + Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +Request Headers: + +- **X-Registry-Auth** – Include a base64-encoded AuthConfig. + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +and Docker images report: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +Query Parameters: + +- **since** – Timestamp used for polling +- **until** – Timestamp used for polling +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `event=`; -- event to filter + - `image=`; -- image to filter + - `container=`; -- container to filter + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images. + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +### Exec Create + +`POST /containers/(id)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "Cmd": [ + "date" + ] + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id": "f90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. + + +Status Codes: + +- **201** – no error +- **404** – no such container + +### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + {{ STREAM }} + +Json Parameters: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + + **Stream details**: + Similar to the stream behavior of `POST /container/(id)/attach` API + +### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/resize HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: text/plain + +Query Parameters: + +- **h** – height of `tty` session +- **w** – width + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + +### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [], + } + } + +Status Codes: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +## 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +## 3.3 CORS Requests + +To set cross origin requests to the remote api please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker daemon -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/reference/api/docker_remote_api_v1.3.md b/docs/reference/api/docker_remote_api_v1.3.md new file mode 100644 index 00000000..1e2b4b3c --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.3.md @@ -0,0 +1,1110 @@ + + +# Docker Remote API v1.3 + +# 1. Brief introduction + +- The Remote API is replacing rcli +- Default port in the docker daemon is 2375 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "centos:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "fedora:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + } + ] + +Query Parameters: + +   + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"ubuntu", + "Volumes":{}, + "VolumesFrom":"" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "PID":"11935", + "Tty":"pts/2", + "Time":"00:00:00", + "Cmd":"sh" + }, + { + "PID":"12140", + "Tty":"pts/2", + "Time":"00:00:00", + "Cmd":"sleep" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"] + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/(format)` + +List images `format` could be json or viz (json default) + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Repository":"ubuntu", + "Tag":"precise", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + }, + { + "Repository":"ubuntu", + "Tag":"12.04", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + } + ] + +**Example request**: + + GET /images/viz HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + digraph docker { + "d82cbacda43a" -> "074be284591f" + "1496068ca813" -> "08306dc45919" + "08306dc45919" -> "0e7893146ac2" + "b750fe79269d" -> "1496068ca813" + base -> "27cf78414709" [style=invis] + "f71189fff3de" -> "9a33b36209ed" + "27cf78414709" -> "b750fe79269d" + "0e7893146ac2" -> "d6434d954665" + "d6434d954665" -> "d82cbacda43a" + base -> "e9aa60c60128" [style=invis] + "074be284591f" -> "f71189fff3de" + "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + base [style=invisible] + } + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by defaul + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/centos/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"centos", + "Volumes":null, + "VolumesFrom":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/fedora/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + + > **Example request**: + > + > POST /images/test/push HTTP/1.1 + > {{ authConfig }} + > + > **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com) + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Name":"cespare/sshd", + "Description":"" + }, + { + "Name":"johnfuller/sshd", + "Description":"" + }, + { + "Name":"dhrp/mongodb-sshd", + "Description":"" + } + ] + + :query term: term to search + :statuscode 200: no error + :statuscode 500: server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + + {{ STREAM }} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + The archive must include a file called Dockerfile at its root. I + may include any number of other files, which will be accessible in + the build context (See the ADD build command). + + The Content-type header should be set to "application/tar". + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – build source URI (git or HTTPS/HTTP) +- **q** – suppress verbose build output + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "EventsListeners":"0", + "LXCVersion":"0.7.5", + "KernelVersion":"3.8.0-19-generic" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Cmd": ["cat", "/world"], + "PortSpecs":["22"] + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","time":1374067924} + {"status":"start","id":"dfdf82bd3881","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run` : + + - Create the container + + - If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + + - Start the container + + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + + - If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + +> docker -d -H="192.168.1.9:2375" -api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.4.md b/docs/reference/api/docker_remote_api_v1.4.md new file mode 100644 index 00000000..6cedddf1 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.4.md @@ -0,0 +1,1153 @@ + + +# Docker Remote API v1.4 + +# 1. Brief introduction + +- The Remote API is replacing rcli +- Default port in the docker daemon is 2375 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "centos:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "fedora:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":"", + "SizeRw":12288, + "SizeRootFs":0 + } + ] + +Query Parameters: + +   + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Privileged": false, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"ubuntu", + "Volumes":{}, + "VolumesFrom":"", + "WorkingDir":"" + + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **409** – conflict between containers and images +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}] + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/(format)` + +List images `format` could be json or viz (json default) + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Repository":"ubuntu", + "Tag":"precise", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + }, + { + "Repository":"ubuntu", + "Tag":"12.04", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + } + ] + +**Example request**: + + GET /images/viz HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + digraph docker { + "d82cbacda43a" -> "074be284591f" + "1496068ca813" -> "08306dc45919" + "08306dc45919" -> "0e7893146ac2" + "b750fe79269d" -> "1496068ca813" + base -> "27cf78414709" [style=invis] + "f71189fff3de" -> "9a33b36209ed" + "27cf78414709" -> "b750fe79269d" + "0e7893146ac2" -> "d6434d954665" + "d6434d954665" -> "d82cbacda43a" + base -> "e9aa60c60128" [style=invis] + "074be284591f" -> "f71189fff3de" + "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + base [style=invisible] + } + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by defaul + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/centos/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"centos", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict between containers and images +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/fedora/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + {{ authConfig }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} {"status":"Pushing", "progress":"1/? (n/a)"} + {"error":"Invalid..."} ... + +Status Codes: + +- **200** – no error :statuscode 404: no such image :statuscode + 500: server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com) + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Name":"cespare/sshd", + "Description":"" + }, + { + "Name":"johnfuller/sshd", + "Description":"" + }, + { + "Name":"dhrp/mongodb-sshd", + "Description":"" + } + ] + + :query term: term to search + :statuscode 200: no error + :statuscode 500: server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + + {{ STREAM }} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + The archive must include a file called Dockerfile at its root. I + may include any number of other files, which will be accessible in + the build context (See the ADD build command). + + The Content-type header should be set to "application/tar". + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – build source URI (git or HTTPS/HTTP) +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Cmd": ["cat", "/world"], + "PortSpecs":["22"] + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run` : + + - Create the container + + - If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + + - Start the container + + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + + - If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.5.md b/docs/reference/api/docker_remote_api_v1.5.md new file mode 100644 index 00000000..76c6142c --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.5.md @@ -0,0 +1,1159 @@ + + +# Docker Remote API v1.5 + +# 1. Brief introduction + +- The Remote API is replacing rcli +- Default port in the docker daemon is 2375 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "centos:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "fedora:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + } + ] + +Query Parameters: + +   + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Privileged": false, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"ubuntu", + "Volumes":{}, + "VolumesFrom":"", + "WorkingDir":"" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "ubuntu", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir":"" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles":[ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes":[ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}] + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource":"test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/(format)` + +List images `format` could be json or viz (json default) + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Repository":"ubuntu", + "Tag":"precise", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + }, + { + "Repository":"ubuntu", + "Tag":"12.04", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + } + ] + +**Example request**: + + GET /images/viz HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + digraph docker { + "d82cbacda43a" -> "074be284591f" + "1496068ca813" -> "08306dc45919" + "08306dc45919" -> "0e7893146ac2" + "b750fe79269d" -> "1496068ca813" + base -> "27cf78414709" [style=invis] + "f71189fff3de" -> "9a33b36209ed" + "27cf78414709" -> "b750fe79269d" + "0e7893146ac2" -> "d6434d954665" + "d6434d954665" -> "d82cbacda43a" + base -> "e9aa60c60128" [style=invis] + "074be284591f" -> "f71189fff3de" + "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + base [style=invisible] + } + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by defaul + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=ubuntu HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/centos/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"centos", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/fedora/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + + The `X-Registry-Auth` header can be used to + include a base64-encoded AuthConfig object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged":"3e2f21a89f"}, + {"Deleted":"3e2f21a89f"}, + {"Deleted":"53b4f83ac9"} + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com) + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Name":"cespare/sshd", + "Description":"" + }, + { + "Name":"johnfuller/sshd", + "Description":"" + }, + { + "Name":"dhrp/mongodb-sshd", + "Description":"" + } + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + + {{ STREAM }} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + The archive must include a file called Dockerfile at its root. I + may include any number of other files, which will be accessible in + the build context (See the ADD build command). + + The Content-type header should be set to "application/tar". + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – build source URI (git or HTTPS/HTTP) +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **rm** – remove intermediate containers after a successful build + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com", + "serveraddress":"https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Cmd": ["cat", "/world"], + "PortSpecs":["22"] + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run`: + + - Create the container + - If the status code is 404, it means the image doesn't exist: + Try to pull it - Then retry to create the container + - Start the container + - If you are not in detached mode: + Attach to the container, using logs=1 (to have stdout and stderr + from the container's start) and stream=1 + - If in detached mode or only stdin is attached: + Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.6.md b/docs/reference/api/docker_remote_api_v1.6.md new file mode 100644 index 00000000..41976816 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.6.md @@ -0,0 +1,1261 @@ + + +# Docker Remote API v1.6 + +# 1. Brief introduction + + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +   + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "ExposedPorts":{}, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"base", + "Volumes":{}, + "VolumesFrom":"", + "WorkingDir":"" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Query Parameters: + +   + +- **name** – container name to use + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + + **More Complex Example request, in 2 steps.** **First, use create to + expose a Private Port, which can be bound back to a Public Port a + startup**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Cmd":[ + "/usr/sbin/sshd","-D" + ], + "Image":"image-with-sshd", + "ExposedPorts":{"22/tcp":{}} + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + + **Second, start (using the ID returned above) the image we just + created, mapping the ssh port 22 to something on the host**: + + POST /containers/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }]} + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain; charset=utf-8 + Content-Length: 0 + + **Now you can ssh into your new container on port 11022.** + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "ExposedPorts": {}, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], + "ContainerIDFile": "", + "Privileged": false, + "PortBindings": {"22/tcp": [{HostIp:"", HostPort:""}]}, + "Links": [], + "PublishAllPorts": false + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +   + +- **signal** – Signal to send to the container (integer). When no + set, SIGKILL is assumed and the call will waits for the + container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1) + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/(format)` + +List images `format` could be json or viz (json default) + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Repository":"base", + "Tag":"ubuntu-12.10", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + }, + { + "Repository":"base", + "Tag":"ubuntu-quantal", + "Id":"b750fe79269d", + "Created":1364102658, + "Size":24653, + "VirtualSize":180116135 + } + ] + +**Example request**: + + GET /images/viz HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + digraph docker { + "d82cbacda43a" -> "074be284591f" + "1496068ca813" -> "08306dc45919" + "08306dc45919" -> "0e7893146ac2" + "b750fe79269d" -> "1496068ca813" + base -> "27cf78414709" [style=invis] + "f71189fff3de" -> "9a33b36209ed" + "27cf78414709" -> "b750fe79269d" + "0e7893146ac2" -> "d6434d954665" + "d6434d954665" -> "d82cbacda43a" + base -> "e9aa60c60128" [style=invis] + "074be284591f" -> "f71189fff3de" + "b750fe79269d" [label="b750fe79269d\nbase",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "e9aa60c60128" [label="e9aa60c60128\nbase2",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + "9a33b36209ed" [label="9a33b36209ed\ntest",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; + base [style=invisible] + } + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by defaul + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=base HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/base/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/base/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} {"status":"Pushing", "progress":"1/? (n/a)"} + {"error":"Invalid..."} ... + + > The `X-Registry-Auth` header can be used to + > include a base64-encoded AuthConfig object. + +Status Codes: + +- **200** – no error :statuscode 404: no such image :statuscode + 500: server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com) + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Name":"cespare/sshd", + "Description":"" + }, + { + "Name":"johnfuller/sshd", + "Description":"" + }, + { + "Name":"dhrp/mongodb-sshd", + "Description":"" + } + ] + + :query term: term to search + :statuscode 200: no error + :statuscode 500: server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + + {{ STREAM }} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + The archive must include a file called Dockerfile at its root. I + may include any number of other files, which will be accessible in + the build context (See the ADD build command). + + The Content-type header should be set to "application/tar". + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – build source URI (git or HTTPS/HTTP) +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Cmd": ["cat", "/world"], + "ExposedPorts":{"22/tcp":{}} + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run` : + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.7.md b/docs/reference/api/docker_remote_api_v1.7.md new file mode 100644 index 00000000..e3206e85 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.7.md @@ -0,0 +1,1249 @@ + + +# Docker Remote API v1.7 + +# 1. Brief introduction + + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "VolumesFrom":"", + "WorkingDir":"", + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **config** – the container's configuration + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {} + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "Privileged":false, + "PublishAllPorts":false + } + + Binds need to reference Volumes that were defined during container + creation. + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.7/#create-a-container), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1) + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=base HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/base/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/base/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... + + Request Headers: + +   + +- **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {{ STREAM }} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – build source URI (git or HTTPS/HTTP) +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image + + Request Headers: + +   + +- **Content-type** – should be set to + `"application/tar"`. + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **run** – config automatically applied when the image is run. + (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + :statuscode 200: no error + :statuscode 500: server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. + +**Example request** + + POST /images/load + + Tarball in body + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + + :statuscode 200: no error + :statuscode 500: server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run` : + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.8.md b/docs/reference/api/docker_remote_api_v1.8.md new file mode 100644 index 00000000..a1a231d5 --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.8.md @@ -0,0 +1,1325 @@ + + +# Docker Remote API v1.8 + +# 1. Brief introduction + + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +   + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "VolumesFrom":"", + "WorkingDir":"", + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +   + +- **Hostname** – Container host name +- **User** – Username or UID +- **Memory** – Memory Limit in bytes +- **CpuShares** – CPU shares (relative weight) +- **AttachStdin** – 1/True/true or 0/False/false, attach to + standard input. Default false +- **AttachStdout** – 1/True/true or 0/False/false, attach to + standard output. Default false +- **AttachStderr** – 1/True/true or 0/False/false, attach to + standard error. Default false +- **Tty** – 1/True/true or 0/False/false, allocate a pseudo-tty. + Default false +- **OpenStdin** – 1/True/true or 0/False/false, keep stdin open + even if not attached. Default false + +Query Parameters: + +   + +- **name** – Assign the specified name to the container. Mus + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **Binds** – Create a bind mount to a directory or file with + [host-path]:[container-path]:[rw|ro]. If a directory + "container-path" is missing, then docker creates a new volume. +- **LxcConf** – Map of custom lxc options +- **PortBindings** – Expose ports from the container, optionally + publishing them via the HostPort flag +- **PublishAllPorts** – 1/True/true or 0/False/false, publish all + exposed ports to the host interfaces. Default false +- **Privileged** – 1/True/true or 0/False/false, give extended + privileges to this container. Default false + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1) + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=base HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/base/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/base/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + Request Headers: + +   + +- **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – build source URI (git or HTTPS/HTTP) +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image + + Request Headers: + +   + +- **Content-type** – should be set to + `"application/tar"`. +- **X-Registry-Auth** – base64-encoded AuthConfig objec + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **run** – config automatically applied when the image is run. + (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, +or via polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run`: + + - Create the container + + - If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + + - Start the container + + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + + - If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/docker_remote_api_v1.9.md b/docs/reference/api/docker_remote_api_v1.9.md new file mode 100644 index 00000000..56dbfaae --- /dev/null +++ b/docs/reference/api/docker_remote_api_v1.9.md @@ -0,0 +1,1358 @@ + + +# Docker Remote API v1.9 + +# 1. Brief introduction + + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can bind + Docker to another host/port or a Unix socket. + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers. + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +Query Parameters: + +   + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created containers, include non-running ones. +- **since** – Show only containers created since Id, include non-running ones. +- **before** – Show only containers created before Id, include non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "VolumesFrom":"", + "WorkingDir":"", + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +   + +- **Hostname** – Container host name +- **User** – Username or UID +- **Memory** – Memory Limit in bytes +- **CpuShares** – CPU shares (relative weight) +- **AttachStdin** – 1/True/true or 0/False/false, attach to + standard input. Default false +- **AttachStdout** – 1/True/true or 0/False/false, attach to + standard output. Default false +- **AttachStderr** – 1/True/true or 0/False/false, attach to + standard error. Default false +- **Tty** – 1/True/true or 0/False/false, allocate a pseudo-tty. + Default false +- **OpenStdin** – 1/True/true or 0/False/false, keep stdin open + even if not attached. Default false + +Query Parameters: + +   + +- **name** – Assign the specified name to the container. Mus + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles": [ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes": [ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false + } + +**Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + +Json Parameters: + +   + +- **Binds** – Create a bind mount to a directory or file with + [host-path]:[container-path]:[rw|ro]. If a directory + "container-path" is missing, then docker creates a new volume. +- **LxcConf** – Map of custom lxc options +- **PortBindings** – Expose ports from the container, optionally + publishing them via the HostPort flag +- **PublishAllPorts** – 1/True/true or 0/False/false, publish all + exposed ports to the host interfaces. Default false +- **Privileged** – 1/True/true or 0/False/false, give extended + privileges to this container. Default false + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 OK + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will wait for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Defaul + false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create`](#create-a-container), the + stream is the raw data from the process PTY and client's stdin. When + the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 bytes + 4. Read the extracted size and output it on the correct output + 5. Goto 1) + +### Attach to a container (websocket) + +`GET /containers/(id)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing i + +**Example request**: + + POST /images/create?fromImage=base HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + +Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Insert a file in an image + +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` + +**Example request**: + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} + {"error":"Invalid..."} + ... + +Query Parameters: + +- **url** – The url from where the file is taken +- **path** – The path where the file is stored + +Status Codes: + +- **200** – no error +- **500** – server error + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/base/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/base/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + + Request Headers: + +   + +- **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name*) +: Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile + +`POST /build` + +Build an image from Dockerfile using a POST body. + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#add)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **remote** – build source URI (git or HTTPS/HTTP) +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **rm** – Remove intermediate containers after a successful build + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":" hannibal", + "password: "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the Docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id": "596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **m** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "base:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "base:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "base:latest", "time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified by `name`. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run` : + + - Create the container + + - If the status code is 404, it means the image doesn't exist: + +- Try to pull it +- Then retry to create the container + + - Start the container + + - If you are not in detached mode: + +- Attach to the container, using logs=1 (to have stdout and +- stderr from the container's start) and stream=1 + + - If in detached mode or only stdin is attached: + +- Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/reference/api/images/event_state.gliffy b/docs/reference/api/images/event_state.gliffy new file mode 100644 index 00000000..fd4cc5e4 --- /dev/null +++ b/docs/reference/api/images/event_state.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":1109,"height":556,"nodeIndex":360,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":26.46762966848334,"y":100},"max":{"x":1109,"y":555.2340187157677}},"objects":[{"x":373.99998474121094,"y":389.93402099609375,"rotation":0.0,"id":355,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":0,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.7071067811865475,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":-0.663724900050094,"endArrowRotation":-0.6637248993502937,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[22.0,-17.0],[94.00000762939453,-17.0],[94.00000762939453,-61.64974974863185],[166.00001525878906,-61.64974974863185]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":359,"width":74.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5,"linePerpValue":0.0,"cardinalityType":null,"html":"

docker start

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":275.99998474121094,"y":323.93402099609375,"rotation":0.0,"id":344,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":87,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":335,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":193,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":105.08369488824782,"endArrowRotation":91.96866662391399,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[22.531977827253513,30.06597900390625],[22.531977827253513,51.06597900390625],[-52.96697615221987,51.06597900390625],[-52.96697615221987,106.06597900390625]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":347,"width":63.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker rm

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":279.99998474121094,"y":249.93402099609375,"rotation":0.0,"id":342,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":86,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":188,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-74.99998474121094,0.06597900390625],[297.50001525878906,0.06597900390625],[297.50001525878906,50.06597900390625]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":313.99998474121094,"y":290.93402099609375,"rotation":0.0,"id":341,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":84,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":335,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[19.531977827253513,28.06597900390625],[88.35546419381131,28.06597900390625],[157.17895056036912,28.06597900390625],[226.00243692692698,28.06597900390625]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":353,"width":74.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker start

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":214.99998474121094,"y":322.93402099609375,"rotation":0.0,"id":340,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":83,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":228,"py":0.5733505249023437,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":335,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-7.637919363960094,-3.93402099609375],[11.085379699777775,-3.93402099609375],[29.808678763515644,-3.93402099609375],[48.53197782725351,-3.93402099609375]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":83.0,"y":251.0,"rotation":0.0,"id":328,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":79,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":188,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-52.03237033151666,-0.9999999999999716],[47.0,-1.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":332,"width":67.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5233416311379174,"linePerpValue":null,"cardinalityType":null,"html":"

docker run

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":74.0,"y":318.0,"rotation":0.0,"id":327,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":77,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":228,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-42.0,1.0],[58.5,2.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":333,"width":83.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5689443767164591,"linePerpValue":null,"cardinalityType":null,"html":"

docker create

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":191.0,"y":409.0,"rotation":0.0,"id":325,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":76,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":215,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-21.0,41.0],[-61.0,41.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":331.0,"y":346.0,"rotation":0.0,"id":320,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":74,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":209,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.5319625684644507,49.0],[-41.734018715767775,49.0],[-41.734018715767775,104.0],[-86.0,104.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":324,"width":63.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker rm

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":872.0,"y":503.0,"rotation":0.0,"id":310,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":73,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":205,"py":0.0,"px":0.2928932188134524}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-60.03300858899104,-53.0],[-148.0,-151.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":735.0,"y":341.0,"rotation":0.0,"id":307,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":71,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":203,"py":0.2928932188134525,"px":1.1102230246251563E-16}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[137.5,60.7157287525381]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":309,"width":83.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.37922003257116654,"linePerpValue":null,"cardinalityType":null,"html":"

docker pause

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1023.0,"y":446.0,"rotation":0.0,"id":298,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":213,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":205,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[39.5,-1.0],[39.5,24.0],[-158.0,24.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":313,"width":101.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.37286693198126664,"linePerpValue":null,"cardinalityType":null,"html":"

 docker unpause

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":904.0,"y":434.0,"rotation":0.0,"id":295,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":203,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":213,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[43.5,-24.0],[123.5,-24.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":411.0,"y":419.0,"rotation":0.0,"id":291,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":66,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":217,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[7.2659812842321685,51.0],[-42.46803743153555,51.0],[-42.46803743153555,11.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":292,"width":21.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.4870188236535277,"linePerpValue":null,"cardinalityType":null,"html":"

No

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":415.0,"y":419.0,"rotation":0.0,"id":289,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":217,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[53.26598128423217,1.0],[53.26598128423217,-32.5],[162.5,-32.5],[162.5,-79.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":290,"width":26.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.46753493572435184,"linePerpValue":null,"cardinalityType":null,"html":"

Yes

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":521.0,"y":209.0,"rotation":0.0,"id":287,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":63,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":195,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-11.0,-19.0],[-97.23401871576777,-19.0],[-97.23401871576777,186.0],[-117.46803743153555,186.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[]},{"x":988.0,"y":232.0,"rotation":0.0,"id":282,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":62,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":201,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[39.5,18.0],[-150.0,18.0],[-150.0,68.0],[-250.0,68.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[]},{"x":664.0,"y":493.0,"rotation":0.0,"id":276,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":61,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":207,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":236,"py":0.7071067811865475,"px":0.9999999999999998}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[8.5,42.23401871576766],[-20.25,42.23401871576766],[-20.25,-44.7157287525381],[-49.0,-44.7157287525381]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":678.0,"y":344.0,"rotation":0.0,"id":273,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":59,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":236,"py":0.29289321881345237,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":91.17113025781374,"endArrowRotation":176.63803454243802,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[2.0,-4.0],[2.0,87.7157287525381],[-63.0,87.7157287525381]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":57.0,"height":40.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5,"linePerpValue":0.0,"cardinalityType":null,"html":"

container 

process

exited

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":566.0,"y":431.0,"rotation":0.0,"id":272,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":58,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":236,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":217,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-26.0,9.0],[-36.867009357883944,9.0],[-36.867009357883944,39.0],[-47.73401871576789,39.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":785.0,"y":119.0,"rotation":0.0,"id":270,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[5.0,1.0],[-416.46803743153555,1.0],[-416.46803743153555,241.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":829.0,"y":172.0,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":248,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.5,-2.0],[-1.5,-32.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":661.0,"y":189.0,"rotation":0.0,"id":267,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":55,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":195,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[7.0,2.284271247461902],[-76.0,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":946.0,"y":319.0,"rotation":0.0,"id":263,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":53,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":197,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":233,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.5,1.0],[81.5,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":708.0,"y":286.0,"rotation":0.0,"id":256,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":51,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":254,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.5,-2.0],[-0.5,-76.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":258,"width":64.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.3108108108108108,"linePerpValue":null,"cardinalityType":null,"html":"

docker kill

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":710.0,"y":359.0,"rotation":0.0,"id":245,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":43,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":207,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-2.5,-5.0],[0.0,156.23401871576766]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":247,"width":83.0,"height":27.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 killed by

out-of-memory

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":761.0,"y":318.0,"rotation":0.0,"id":238,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":197,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-18.5,1.0],[111.5,2.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":240,"width":85.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.4363456059259962,"linePerpValue":null,"cardinalityType":null,"html":"

docker restart

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":608.0,"y":319.0,"rotation":0.0,"id":232,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":36,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":191,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":211,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[7.0,1.0],[64.5,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":333.53196256846445,"y":360.0,"rotation":0.0,"id":209,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":21,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e6b8af","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555554,"y":0.0,"rotation":0.0,"id":210,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

stopped

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":300.0,"rotation":0.0,"id":191,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":192,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

start

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":510.0,"y":170.0,"rotation":0.0,"id":195,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":196,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

kill

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":872.5,"y":300.0,"rotation":0.0,"id":197,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":198,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

die

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":790.0,"y":100.0,"rotation":0.0,"id":199,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":200,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

stop

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":790.0,"y":450.0,"rotation":0.0,"id":205,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":206,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

unpause

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":672.5,"y":515.2340187157677,"rotation":0.0,"id":207,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":208,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

OOM

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":672.5,"y":284.0,"rotation":0.0,"id":211,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":23,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#b6d7a8","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":212,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

running

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":403.5319625684644,"y":420.0,"rotation":0.0,"id":227,"width":130.46803743153555,"height":116.23401871576777,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":33,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-6.765981284232225,"y":76.0,"rotation":45.0,"id":223,"width":80.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Restart 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":57.234018715767775,"y":75.0,"rotation":315.0,"id":219,"width":80.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Policy

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":14.734018715767775,"y":0.0,"rotation":0.0,"id":217,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.decision","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.diamond.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":218,"width":96.0,"height":27.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Should restart?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]}]},{"x":1027.5,"y":375.0,"rotation":0.0,"id":213,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":25,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#fce5cd","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":214,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

paused

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":872.5,"y":390.0,"rotation":0.0,"id":203,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":204,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pause

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":420.0,"rotation":0.0,"id":236,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":237,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

die

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":790.0,"y":170.0,"rotation":0.0,"id":248,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":45,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":249,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

die

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":670.0,"y":170.0,"rotation":0.0,"id":254,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":255,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

die

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":740.0,"y":323.0,"rotation":0.0,"id":250,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":47,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":248,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-10.0,-33.0],[87.5,-113.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":253,"width":73.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker stop

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1027.5,"y":300.0,"rotation":0.0,"id":233,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":234,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

start

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1027.5,"y":230.0,"rotation":0.0,"id":201,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":202,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

restart

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1066.5,"y":298.0,"rotation":0.0,"id":264,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":54,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":233,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":201,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.5,2.0],[-1.5,-28.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":132.5,"y":300.0,"rotation":0.0,"id":228,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":229,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

create

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":130.0,"y":230.0,"rotation":0.0,"id":188,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":190,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

create

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":263.53196256846445,"y":284.0,"rotation":0.0,"id":335,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":81,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555554,"y":0.0,"rotation":0.0,"id":336,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

created

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":60.0,"y":415.0,"rotation":0.0,"id":215,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#b7b7b7","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":216,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

deleted

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":170.0,"y":430.0,"rotation":0.0,"id":193,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":194,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

destroy

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]}],"shapeStyles":{"com.gliffy.shape.uml.uml_v2.state_machine":{"fill":"#e2e2e2","stroke":"#000000","strokeWidth":2},"com.gliffy.shape.flowchart.flowchart_v1.default":{"fill":"#a4c2f4","stroke":"#333333","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1,"orthoMode":2}},"textStyles":{"global":{"bold":true,"color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/reference/api/images/event_state.png b/docs/reference/api/images/event_state.png new file mode 100644 index 00000000..28d09ba1 Binary files /dev/null and b/docs/reference/api/images/event_state.png differ diff --git a/docs/reference/api/registry_api.md b/docs/reference/api/registry_api.md new file mode 100644 index 00000000..9da7cae6 --- /dev/null +++ b/docs/reference/api/registry_api.md @@ -0,0 +1,598 @@ + + +# Docker Registry API v1 + +## Introduction + + - This is the REST API for the Docker Registry 1.0 + - It stores the images and the graph for a set of repositories + - It does not have user accounts data + - It has no notion of user accounts or authorization + - It delegates authentication and authorization to the Index Auth + service using tokens + - It supports different storage backends (S3, cloud files, local FS) + - It doesn't have a local database + - The registry is open source: [Docker Registry](https://github.com/docker/docker-registry) + + We expect that there will be multiple registries out there. To help to +grasp the context, here are some examples of registries: + + - **sponsor registry**: such a registry is provided by a third-party + hosting infrastructure as a convenience for their customers and the + Docker community as a whole. Its costs are supported by the third + party, but the management and operation of the registry are + supported by Docker. It features read/write access, and delegates + authentication and authorization to the Index. + - **mirror registry**: such a registry is provided by a third-party + hosting infrastructure but is targeted at their customers only. Some + mechanism (unspecified to date) ensures that public images are + pulled from a sponsor registry to the mirror registry, to make sure + that the customers of the third-party provider can `docker pull` + those images locally. + - **vendor registry**: such a registry is provided by a software + vendor, who wants to distribute Docker images. It would be operated + and managed by the vendor. Only users authorized by the vendor would + be able to get write access. Some images would be public (accessible + for anyone), others private (accessible only for authorized users). + Authentication and authorization would be delegated to the Index. + The goal of vendor registries is to let someone do `docker pull + basho/riak1.3` and automatically push from the vendor registry + (instead of a sponsor registry); i.e., get all the convenience of a + sponsor registry, while retaining control on the asset distribution. + - **private registry**: such a registry is located behind a firewall, + or protected by an additional security layer (HTTP authorization, + SSL client-side certificates, IP address authorization...). The + registry is operated by a private entity, outside of Docker's + control. It can optionally delegate additional authorization to the + Index, but it is not mandatory. + +> **Note**: +> Mirror registries and private registries which do not use the Index +> don't even need to run the registry code. They can be implemented by any +> kind of transport implementing HTTP GET and PUT. Read-only registries +> can be powered by a simple static HTTPS server. + +> **Note**: +> The latter implies that while HTTP is the protocol of choice for a registry, +> multiple schemes are possible (and in some cases, trivial): +> +> - HTTP with GET (and PUT for read-write registries); +> - local mount point; +> - remote Docker addressed through SSH. + +The latter would only require two new commands in Docker, e.g., +`registryget` and `registryput`, wrapping access to the local filesystem +(and optionally doing consistency checks). Authentication and authorization +are then delegated to SSH (e.g., with public keys). + +> **Note**: +> Private registry servers that expose an HTTP endpoint need to be secured with +> TLS (preferably TLSv1.2, but at least TLSv1.0). Make sure to put the CA +> certificate at /etc/docker/certs.d/my.registry.com:5000/ca.crt on the Docker +> host, so that the daemon can securely access the private registry. +> Support for SSLv3 and lower is not available due to security issues. + +The default namespace for a private repository is `library`. + +# Endpoints + +## Images + +### Get image layer + +`GET /v1/images/(image_id)/layer` + +Get image layer for a given `image_id` + +**Example Request**: + + GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + Authorization: Token signature=123abc,repository="foo/bar",access=read + +Parameters: + +- **image_id** – the id for the layer you want to get + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + X-Docker-Registry-Version: 0.6.0 + Cookie: (Cookie provided by the Registry) + + {layer binary data stream} + +Status Codes: + +- **200** – OK +- **401** – Requires authorization +- **404** – Image not found + +### Put image layer + +`PUT /v1/images/(image_id)/layer` + +Put image layer for a given `image_id` + +**Example Request**: + + PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1 + Host: registry-1.docker.io + Transfer-Encoding: chunked + Authorization: Token signature=123abc,repository="foo/bar",access=write + + {layer binary data stream} + +Parameters: + +- **image_id** – the id for the layer you want to get + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + + "" + +Status Codes: + +- **200** – OK +- **401** – Requires authorization +- **404** – Image not found + +## Image + +### Put image layer + +`PUT /v1/images/(image_id)/json` + +Put image for a given `image_id` + +**Example Request**: + + PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + Cookie: (Cookie provided by the Registry) + + { + id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c", + parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f", + created: "2013-04-30T17:46:10.843673+03:00", + container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7", + container_config: { + Hostname: "host-test", + User: "", + Memory: 0, + MemorySwap: 0, + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Tty: false, + OpenStdin: false, + StdinOnce: false, + Env: null, + Cmd: [ + "/bin/bash", + "-c", + "apt-get -q -yy -f install libevent-dev" + ], + Dns: null, + Image: "imagename/blah", + Volumes: { }, + VolumesFrom: "" + }, + docker_version: "0.1.7" + } + +Parameters: + +- **image_id** – the id for the layer you want to get + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + + "" + +Status Codes: + +- **200** – OK +- **401** – Requires authorization + +### Get image layer + +`GET /v1/images/(image_id)/json` + +Get image for a given `image_id` + +**Example Request**: + + GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + Cookie: (Cookie provided by the Registry) + +Parameters: + +- **image_id** – the id for the layer you want to get + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + X-Docker-Size: 456789 + X-Docker-Checksum: b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087 + + { + id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c", + parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f", + created: "2013-04-30T17:46:10.843673+03:00", + container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7", + container_config: { + Hostname: "host-test", + User: "", + Memory: 0, + MemorySwap: 0, + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Tty: false, + OpenStdin: false, + StdinOnce: false, + Env: null, + Cmd: [ + "/bin/bash", + "-c", + "apt-get -q -yy -f install libevent-dev" + ], + Dns: null, + Image: "imagename/blah", + Volumes: { }, + VolumesFrom: "" + }, + docker_version: "0.1.7" + } + +Status Codes: + +- **200** – OK +- **401** – Requires authorization +- **404** – Image not found + +## Ancestry + +### Get image ancestry + +`GET /v1/images/(image_id)/ancestry` + +Get ancestry for an image given an `image_id` + +**Example Request**: + + GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/ancestry HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + Cookie: (Cookie provided by the Registry) + +Parameters: + +- **image_id** – the id for the layer you want to get + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + + ["088b4502f51920fbd9b7c503e87c7a2c05aa3adc3d35e79c031fa126b403200f", + "aeee63968d87c7da4a5cf5d2be6bee4e21bc226fd62273d180a49c96c62e4543", + "bfa4c5326bc764280b0863b46a4b20d940bc1897ef9c1dfec060604bdc383280", + "6ab5893c6927c15a15665191f2c6cf751f5056d8b95ceee32e43c5e8a3648544"] + +Status Codes: + +- **200** – OK +- **401** – Requires authorization +- **404** – Image not found + +## Tags + +### List repository tags + +`GET /v1/repositories/(namespace)/(repository)/tags` + +Get all of the tags for the given repo. + +**Example Request**: + + GET /v1/repositories/reynholm/help-system-server/tags HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + Cookie: (Cookie provided by the Registry) + +Parameters: + +- **namespace** – namespace for the repo +- **repository** – name for the repo + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + + { + "latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", + "0.1.1": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087" + } + +Status Codes: + +- **200** – OK +- **401** – Requires authorization +- **404** – Repository not found + +### Get image id for a particular tag + +`GET /v1/repositories/(namespace)/(repository)/tags/(tag*)` + +Get a tag for the given repo. + +**Example Request**: + + GET /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + Cookie: (Cookie provided by the Registry) + +Parameters: + +- **namespace** – namespace for the repo +- **repository** – name for the repo +- **tag** – name of tag you want to get + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + + "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f" + +Status Codes: + +- **200** – OK +- **401** – Requires authorization +- **404** – Tag not found + +### Delete a repository tag + +`DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*)` + +Delete the tag for the repo + +**Example Request**: + + DELETE /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + Cookie: (Cookie provided by the Registry) + +Parameters: + +- **namespace** – namespace for the repo +- **repository** – name for the repo +- **tag** – name of tag you want to delete + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + + "" + +Status Codes: + +- **200** – OK +- **401** – Requires authorization +- **404** – Tag not found + +### Set a tag for a specified image id + +`PUT /v1/repositories/(namespace)/(repository)/tags/(tag*)` + +Put a tag for the given repo. + +**Example Request**: + + PUT /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + Cookie: (Cookie provided by the Registry) + + "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f" + +Parameters: + +- **namespace** – namespace for the repo +- **repository** – name for the repo +- **tag** – name of tag you want to add + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + + "" + +Status Codes: + +- **200** – OK +- **400** – Invalid data +- **401** – Requires authorization +- **404** – Image not found + +## Repositories + +### Delete a repository + +`DELETE /v1/repositories/(namespace)/(repository)/` + +Delete a repository + +**Example Request**: + + DELETE /v1/repositories/reynholm/help-system-server/ HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + Cookie: (Cookie provided by the Registry) + + "" + +Parameters: + +- **namespace** – namespace for the repo +- **repository** – name for the repo + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + + "" + +Status Codes: + +- **200** – OK +- **401** – Requires authorization +- **404** – Repository not found + +## Search + +If you need to search the index, this is the endpoint you would use. + +`GET /v1/search` + +Search the Index given a search term. It accepts + + [GET](http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.3) + only. + +**Example request**: + + GET /v1/search?q=search_term&page=1&n=25 HTTP/1.1 + Host: index.docker.io + Accept: application/json + +Query Parameters: + +- **q** – what you want to search for +- **n** - number of results you want returned per page (default: 25, min:1, max:100) +- **page** - page number of results + +**Example response**: + + HTTP/1.1 200 OK + Vary: Accept + Content-Type: application/json + + {"num_pages": 1, + "num_results": 3, + "results" : [ + {"name": "ubuntu", "description": "An ubuntu image..."}, + {"name": "centos", "description": "A centos image..."}, + {"name": "fedora", "description": "A fedora image..."} + ], + "page_size": 25, + "query":"search_term", + "page": 1 + } + +Response Items: +- **num_pages** - Total number of pages returned by query +- **num_results** - Total number of results returned by query +- **results** - List of results for the current page +- **page_size** - How many results returned per page +- **query** - Your search term +- **page** - Current page number + +Status Codes: + +- **200** – no error +- **500** – server error + +## Status + +### Status check for registry + +`GET /v1/_ping` + +Check status of the registry. This endpoint is also used to +determine if the registry supports SSL. + +**Example Request**: + + GET /v1/_ping HTTP/1.1 + Host: registry-1.docker.io + Accept: application/json + Content-Type: application/json + + "" + +**Example Response**: + + HTTP/1.1 200 + Vary: Accept + Content-Type: application/json + X-Docker-Registry-Version: 0.6.0 + + "" + +Status Codes: + +- **200** – OK + +## Authorization + +This is where we describe the authorization process, including the +tokens and cookies. + diff --git a/docs/reference/api/registry_api_client_libraries.md b/docs/reference/api/registry_api_client_libraries.md new file mode 100644 index 00000000..f5d5b3e5 --- /dev/null +++ b/docs/reference/api/registry_api_client_libraries.md @@ -0,0 +1,49 @@ + + +# Docker Registry v1 API client libraries + +These libraries have not been tested by the Docker maintainers for +compatibility. Please file issues with the library owners. If you find +more library implementations, please submit a PR with an update to this page +or open an issue in the [Docker](https://github.com/docker/docker/issues) +project and we will add the libraries here. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Language/FrameworkNameRepositoryStatus
JavaScript (AngularJS) WebUIdocker-registry-frontendhttps://github.com/kwk/docker-registry-frontendActive
Godocker-reg-clienthttps://github.com/CenturyLinkLabs/docker-reg-clientActive
diff --git a/docs/reference/api/remote_api_client_libraries.md b/docs/reference/api/remote_api_client_libraries.md new file mode 100644 index 00000000..b917abf0 --- /dev/null +++ b/docs/reference/api/remote_api_client_libraries.md @@ -0,0 +1,206 @@ + + +# Docker Remote API client libraries + +These libraries have not been tested by the Docker maintainers for +compatibility. Please file issues with the library owners. If you find +more library implementations, please list them in Docker doc bugs and we +will add the libraries here. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Language/FrameworkNameRepositoryStatus
C#Docker.DotNethttps://github.com/ahmetalpbalkan/Docker.DotNetActive
C++lasote/docker_clienthttp://www.biicode.com/lasote/docker_client (Biicode C++ dependency manager)Active
Erlangerldockerhttps://github.com/proger/erldockerActive
Dartbwu_dockerhttps://github.com/bwu-dart/bwu_dockerActive
Gogo-dockerclienthttps://github.com/fsouza/go-dockerclientActive
Godockerclienthttps://github.com/samalba/dockerclientActive
Gradlegradle-docker-pluginhttps://github.com/gesellix/gradle-docker-pluginActive
Groovydocker-clienthttps://github.com/gesellix/docker-clientActive
Haskelldocker-hshttps://github.com/denibertovic/docker-hsActive
Javadocker-javahttps://github.com/docker-java/docker-javaActive
Javadocker-clienthttps://github.com/spotify/docker-clientActive
Javajclouds-dockerhttps://github.com/jclouds/jclouds-labs/tree/master/dockerActive
JavaScript (NodeJS)dockerodehttps://github.com/apocas/dockerode + Install via NPM: npm install dockerodeActive
JavaScript (NodeJS)docker.iohttps://github.com/appersonlabs/docker.io + Install via NPM: npm install docker.ioActive
JavaScriptdocker-jshttps://github.com/dgoujard/docker-jsOutdated
JavaScript (Angular) WebUIdocker-cphttps://github.com/13W/docker-cpActive
JavaScript (Angular) WebUIdockeruihttps://github.com/crosbymichael/dockeruiActive
JavaScript (Angular) WebUIdockeryhttps://github.com/lexandro/dockeryActive
PerlNet::Dockerhttps://metacpan.org/pod/Net::DockerActive
PerlEixo::Dockerhttps://github.com/alambike/eixo-dockerActive
PHPAlvinehttp://pear.alvine.io/ (alpha)Active
PHPDocker-PHPhttp://stage1.github.io/docker-php/Active
Pythondocker-pyhttps://github.com/docker/docker-pyActive
Rubydocker-apihttps://github.com/swipely/docker-apiActive
Rubydocker-clienthttps://github.com/geku/docker-clientOutdated
Rustdocker-rusthttps://github.com/abh1nav/docker-rustActive
Scalatugboathttps://github.com/softprops/tugboatActive
Scalareactive-dockerhttps://github.com/almoehi/reactive-dockerActive
+ diff --git a/docs/reference/builder.md b/docs/reference/builder.md new file mode 100644 index 00000000..073fc6d5 --- /dev/null +++ b/docs/reference/builder.md @@ -0,0 +1,1045 @@ + + +# Dockerfile reference + +**Docker can build images automatically** by reading the instructions +from a `Dockerfile`. A `Dockerfile` is a text document that contains all +the commands you would normally execute manually in order to build a +Docker image. By calling `docker build` from your terminal, you can have +Docker build your image step by step, executing the instructions +successively. + +This page discusses the specifics of all the instructions you can use in your +`Dockerfile`. To further help you write a clear, readable, maintainable +`Dockerfile`, we've also written a [`Dockerfile` Best Practices +guide](/articles/dockerfile_best-practices). Lastly, you can test your +Dockerfile knowledge with the [Dockerfile tutorial](/userguide/level1). + +## Usage + +To [*build*](/reference/commandline/cli/#build) an image from a source repository, +create a description file called `Dockerfile` at the root of your repository. +This file will describe the steps to assemble the image. + +Then call `docker build` with the path of your source repository as the argument +(for example, `.`): + + $ docker build . + +The path to the source repository defines where to find the *context* of +the build. The build is run by the Docker daemon, not by the CLI, so the +whole context must be transferred to the daemon. The Docker CLI reports +"Sending build context to Docker daemon" when the context is sent to the daemon. + +> **Warning** +> Avoid using your root directory, `/`, as the root of the source repository. The +> `docker build` command will use whatever directory contains the Dockerfile as the build +> context (including all of its subdirectories). The build context will be sent to the +> Docker daemon before building the image, which means if you use `/` as the source +> repository, the entire contents of your hard drive will get sent to the daemon (and +> thus to the machine running the daemon). You probably don't want that. + +In most cases, it's best to put each Dockerfile in an empty directory. Then, +only add the files needed for building the Dockerfile to the directory. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to the directory. For information about how to +[create a `.dockerignore` file](#dockerignore-file) on this page. + +You can specify a repository and tag at which to save the new image if +the build succeeds: + + $ docker build -t shykes/myapp . + +The Docker daemon will run your steps one-by-one, committing the result +to a new image if necessary, before finally outputting the ID of your +new image. The Docker daemon will automatically clean up the context you +sent. + +Note that each instruction is run independently, and causes a new image +to be created - so `RUN cd /tmp` will not have any effect on the next +instructions. + +Whenever possible, Docker will re-use the intermediate images, +accelerating `docker build` significantly (indicated by `Using cache` - +see the [`Dockerfile` Best Practices +guide](/articles/dockerfile_best-practices/#build-cache) for more information): + + $ docker build -t SvenDowideit/ambassador . + Uploading context 10.24 kB + Uploading context + Step 1 : FROM docker-ut + ---> cbba202fe96b + Step 2 : MAINTAINER SvenDowideit@home.org.au + ---> Using cache + ---> 51182097be13 + Step 3 : CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top + ---> Using cache + ---> 1a5ffc17324d + Successfully built 1a5ffc17324d + +When you're done with your build, you're ready to look into [*Pushing a +repository to its registry*]( /userguide/dockerrepos/#contributing-to-docker-hub). + +## Format + +Here is the format of the `Dockerfile`: + + # Comment + INSTRUCTION arguments + +The Instruction is not case-sensitive, however convention is for them to +be UPPERCASE in order to distinguish them from arguments more easily. + +Docker runs the instructions in a `Dockerfile` in order. **The +first instruction must be \`FROM\`** in order to specify the [*Base +Image*](/terms/image/#base-image) from which you are building. + +Docker will treat lines that *begin* with `#` as a +comment. A `#` marker anywhere else in the line will +be treated as an argument. This allows statements like: + + # Comment + RUN echo 'we are running some # of cool things' + +Here is the set of instructions you can use in a `Dockerfile` for building +images. + +### Environment replacement + +Environment variables (declared with [the `ENV` statement](#env)) can also be +used in certain instructions as variables to be interpreted by the +`Dockerfile`. Escapes are also handled for including variable-like syntax +into a statement literally. + +Environment variables are notated in the `Dockerfile` either with +`$variable_name` or `${variable_name}`. They are treated equivalently and the +brace syntax is typically used to address issues with variable names with no +whitespace, like `${foo}_bar`. + +The `${variable_name}` syntax also supports a few of the standard `bash` +modifiers as specified below: + +* `${variable:-word}` indicates that if `variable` is set then the result + will be that value. If `variable` is not set then `word` will be the result. +* `${variable:+word}` indicates that if `variable` is set then `word` will be + the result, otherwise the result is the empty string. + +In all cases, `word` can be any string, including additional environment +variables. + +Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, +for example, will translate to `$foo` and `${foo}` literals respectively. + +Example (parsed representation is displayed after the `#`): + + FROM busybox + ENV foo /bar + WORKDIR ${foo} # WORKDIR /bar + ADD . $foo # ADD . /bar + COPY \$foo /quux # COPY $foo /quux + +The instructions that handle environment variables in the `Dockerfile` are: + +* `ENV` +* `ADD` +* `COPY` +* `WORKDIR` +* `EXPOSE` +* `VOLUME` +* `USER` + +`ONBUILD` instructions are **NOT** supported for environment replacement, even +the instructions above. + +Environment variable substitution will use the same value for each variable +throughout the entire command. In other words, in this example: + + ENV abc=hello + ENV abc=bye def=$abc + ENV ghi=$abc + +will result in `def` having a value of `hello`, not `bye`. However, +`ghi` will have a value of `bye` because it is not part of the same command +that set `abc` to `bye`. + +### .dockerignore file + +If a file named `.dockerignore` exists in the root of `PATH`, then Docker +interprets it as a newline-separated list of exclusion patterns. Docker excludes +files or directories relative to `PATH` that match these exclusion patterns. If +there are any `.dockerignore` files in `PATH` subdirectories, Docker treats +them as normal files. + +Filepaths in `.dockerignore` are absolute with the current directory as the +root. Wildcards are allowed but the search is not recursive. Globbing (file name +expansion) is done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. + +You can specify exceptions to exclusion rules. To do this, simply prefix a +pattern with an `!` (exclamation mark) in the same way you would in a +`.gitignore` file. Currently there is no support for regular expressions. +Formats like `[^temp*]` are ignored. + +The following is an example `.dockerignore` file: + +``` + */temp* + */*/temp* + temp? + *.md + !LICENSE.md +``` + +This file causes the following build behavior: + +| Rule | Behavior | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `*/temp*` | Exclude all files with names starting with`temp` in any subdirectory below the root directory. For example, a file named`/somedir/temporary.txt` is ignored. | +| `*/*/temp*` | Exclude files starting with name `temp` from any subdirectory that is two levels below the root directory. For example, the file `/somedir/subdir/temporary.txt` is ignored. | +| `temp?` | Exclude the files that match the pattern in the root directory. For example, the files `tempa`, `tempb` in the root directory are ignored. | +| `*.md ` | Exclude all markdown files in the root directory. | +| `!LICENSE.md` | Exception to the Markdown files exclusion is this file, `LICENSE.md`, Include this file in the build. | + +The placement of `!` exception rules influences the matching algorithm; the +last line of the `.dockerignore` that matches a particular file determines +whether it is included or excluded. In the above example, the `LICENSE.md` file +matches both the `*.md` and `!LICENSE.md` rule. If you reverse the lines in the +example: + +``` + */temp* + */*/temp* + temp? + !LICENCSE.md + *.md +``` + +The build would exclude `LICENSE.md` because the last `*.md` rule adds all +Markdown files in the root directory back onto the ignore list. The +`!LICENSE.md` rule has no effect because the subsequent `*.md` rule overrides +it. + +You can even use the `.dockerignore` file to ignore the `Dockerfile` and +`.dockerignore` files. This is useful if you are copying files from the root of +the build context into your new container but do not want to include the +`Dockerfile` or `.dockerignore` files (e.g. `ADD . /someDir/`). + + +## FROM + + FROM + +Or + + FROM : + +Or + + FROM @ + +The `FROM` instruction sets the [*Base Image*](/terms/image/#base-image) +for subsequent instructions. As such, a valid `Dockerfile` must have `FROM` as +its first instruction. The image can be any valid image – it is especially easy +to start by **pulling an image** from the [*Public Repositories*]( +/userguide/dockerrepos). + +`FROM` must be the first non-comment instruction in the `Dockerfile`. + +`FROM` can appear multiple times within a single `Dockerfile` in order to create +multiple images. Simply make a note of the last image ID output by the commit +before each new `FROM` command. + +The `tag` or `digest` values are optional. If you omit either of them, the builder +assumes a `latest` by default. The builder returns an error if it cannot match +the `tag` value. + +## MAINTAINER + + MAINTAINER + +The `MAINTAINER` instruction allows you to set the *Author* field of the +generated images. + +## RUN + +RUN has 2 forms: + +- `RUN ` (the command is run in a shell - `/bin/sh -c` - *shell* form) +- `RUN ["executable", "param1", "param2"]` (*exec* form) + +The `RUN` instruction will execute any commands in a new layer on top of the +current image and commit the results. The resulting committed image will be +used for the next step in the `Dockerfile`. + +Layering `RUN` instructions and generating commits conforms to the core +concepts of Docker where commits are cheap and containers can be created from +any point in an image's history, much like source control. + +The *exec* form makes it possible to avoid shell string munging, and to `RUN` +commands using a base image that does not contain `/bin/sh`. + +> **Note**: +> To use a different shell, other than '/bin/sh', use the *exec* form +> passing in the desired shell. For example, +> `RUN ["/bin/bash", "-c", "echo hello"]` + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `RUN [ "sh", "-c", "echo", "$HOME" ]`. + +The cache for `RUN` instructions isn't invalidated automatically during +the next build. The cache for an instruction like +`RUN apt-get dist-upgrade -y` will be reused during the next build. The +cache for `RUN` instructions can be invalidated by using the `--no-cache` +flag, for example `docker build --no-cache`. + +See the [`Dockerfile` Best Practices +guide](/articles/dockerfile_best-practices/#build-cache) for more information. + +The cache for `RUN` instructions can be invalidated by `ADD` instructions. See +[below](#add) for details. + +### Known issues (RUN) + +- [Issue 783](https://github.com/docker/docker/issues/783) is about file + permissions problems that can occur when using the AUFS file system. You + might notice it during an attempt to `rm` a file, for example. + + For systems that have recent aufs version (i.e., `dirperm1` mount option can + be set), docker will attempt to fix the issue automatically by mounting + the layers with `dirperm1` option. More details on `dirperm1` option can be + found at [`aufs` man page](http://aufs.sourceforge.net/aufs3/man.html) + + If your system doesn't have support for `dirperm1`, the issue describes a workaround. + +## CMD + +The `CMD` instruction has three forms: + +- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) +- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) +- `CMD command param1 param2` (*shell* form) + +There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` +then only the last `CMD` will take effect. + +**The main purpose of a `CMD` is to provide defaults for an executing +container.** These defaults can include an executable, or they can omit +the executable, in which case you must specify an `ENTRYPOINT` +instruction as well. + +> **Note**: +> If `CMD` is used to provide default arguments for the `ENTRYPOINT` +> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified +> with the JSON array format. + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `CMD [ "sh", "-c", "echo", "$HOME" ]`. + +When used in the shell or exec formats, the `CMD` instruction sets the command +to be executed when running the image. + +If you use the *shell* form of the `CMD`, then the `` will execute in +`/bin/sh -c`: + + FROM ubuntu + CMD echo "This is a test." | wc - + +If you want to **run your** `` **without a shell** then you must +express the command as a JSON array and give the full path to the executable. +**This array form is the preferred format of `CMD`.** Any additional parameters +must be individually expressed as strings in the array: + + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + +If you would like your container to run the same executable every time, then +you should consider using `ENTRYPOINT` in combination with `CMD`. See +[*ENTRYPOINT*](#entrypoint). + +If the user specifies arguments to `docker run` then they will override the +default specified in `CMD`. + +> **Note**: +> don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits +> the result; `CMD` does not execute anything at build time, but specifies +> the intended command for the image. + +## LABEL + + LABEL = = = ... + +The `LABEL` instruction adds metadata to an image. A `LABEL` is a +key-value pair. To include spaces within a `LABEL` value, use quotes and +backslashes as you would in command-line parsing. + + LABEL "com.example.vendor"="ACME Incorporated" + +An image can have more than one label. To specify multiple labels, separate each +key-value pair with whitespace. + + LABEL com.example.label-with-value="foo" + LABEL version="1.0" + LABEL description="This text illustrates \ + that label-values can span multiple lines." + +Docker recommends combining labels in a single `LABEL` instruction where +possible. Each `LABEL` instruction produces a new layer which can result in an +inefficient image if you use many labels. This example results in four image +layers. + + LABEL multi.label1="value1" multi.label2="value2" other="value3" + +Labels are additive including `LABEL`s in `FROM` images. As the system +encounters and then applies a new label, new `key`s override any previous labels +with identical keys. + +To view an image's labels, use the `docker inspect` command. + + "Labels": { + "com.example.vendor": "ACME Incorporated" + "com.example.label-with-value": "foo", + "version": "1.0", + "description": "This text illustrates that label-values can span multiple lines.", + "multi.label1": "value1", + "multi.label2": "value2", + "other": "value3" + }, + +## EXPOSE + + EXPOSE [...] + +The `EXPOSE` instructions informs Docker that the container will listen on the +specified network ports at runtime. Docker uses this information to interconnect +containers using links (see the [Docker User +Guide](/userguide/dockerlinks)) and to determine which ports to expose to the +host when [using the -P flag](/reference/run/#expose-incoming-ports). + +> **Note**: +> `EXPOSE` doesn't define which ports can be exposed to the host or make ports +> accessible from the host by default. To expose ports to the host, at runtime, +> [use the `-p` flag](/userguide/dockerlinks) or +> [the -P flag](/reference/run/#expose-incoming-ports). + +## ENV + + ENV + ENV = ... + +The `ENV` instruction sets the environment variable `` to the value +``. This value will be in the environment of all "descendent" `Dockerfile` +commands and can be [replaced inline](#environment-replacement) in many as well. + +The `ENV` instruction has two forms. The first form, `ENV `, +will set a single variable to a value. The entire string after the first +space will be treated as the `` - including characters such as +spaces and quotes. + +The second form, `ENV = ...`, allows for multiple variables to +be set at one time. Notice that the second form uses the equals sign (=) +in the syntax, while the first form does not. Like command line parsing, +quotes and backslashes can be used to include spaces within values. + +For example: + + ENV myName="John Doe" myDog=Rex\ The\ Dog \ + myCat=fluffy + +and + + ENV myName John Doe + ENV myDog Rex The Dog + ENV myCat fluffy + +will yield the same net results in the final container, but the first form +does it all in one layer. + +The environment variables set using `ENV` will persist when a container is run +from the resulting image. You can view the values using `docker inspect`, and +change them using `docker run --env =`. + +> **Note**: +> Environment persistence can cause unexpected effects. For example, +> setting `ENV DEBIAN_FRONTEND noninteractive` may confuse apt-get +> users on a Debian-based image. To set a value for a single command, use +> `RUN = `. + +## ADD + +ADD has two forms: + +- `ADD ... ` +- `ADD ["",... ""]` (this form is required for paths containing +whitespace) + +The `ADD` instruction copies new files, directories or remote file URLs from `` +and adds them to the filesystem of the container at the path ``. + +Multiple `` resource may be specified but if they are files or +directories then they must be relative to the source directory that is +being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. +For most command line uses this should act as expected, for example: + + ADD hom* /mydir/ # adds all files starting with "hom" + ADD hom?.txt /mydir/ # ? is replaced with any single character + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + ADD test aDir/ # adds "test" to `WORKDIR`/aDir/ + +All new files and directories are created with a UID and GID of 0. + +In the case where `` is a remote file URL, the destination will +have permissions of 600. If the remote file being retrieved has an HTTP +`Last-Modified` header, the timestamp from that header will be used +to set the `mtime` on the destination file. However, like any other file +processed during an `ADD`, `mtime` will not be included in the determination +of whether or not the file has changed and the cache should be updated. + +> **Note**: +> If you build by passing a `Dockerfile` through STDIN (`docker +> build - < somefile`), there is no build context, so the `Dockerfile` +> can only contain a URL based `ADD` instruction. You can also pass a +> compressed archive through STDIN: (`docker build - < archive.tar.gz`), +> the `Dockerfile` at the root of the archive and the rest of the +> archive will get used at the context of the build. + +> **Note**: +> If your URL files are protected using authentication, you +> will need to use `RUN wget`, `RUN curl` or use another tool from +> within the container as the `ADD` instruction does not support +> authentication. + +> **Note**: +> The first encountered `ADD` instruction will invalidate the cache for all +> following instructions from the Dockerfile if the contents of `` have +> changed. This includes invalidating the cache for `RUN` instructions. +> See the [`Dockerfile` Best Practices +guide](/articles/dockerfile_best-practices/#build-cache) for more information. + + +The copy obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `ADD ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a URL and `` does not end with a trailing slash, then a + file is downloaded from the URL and copied to ``. + +- If `` is a URL and `` does end with a trailing slash, then the + filename is inferred from the URL and the file is downloaded to + `/`. For instance, `ADD http://example.com/foobar /` would + create the file `/foobar`. The URL must have a nontrivial path so that an + appropriate filename can be discovered in this case (`http://example.com` + will not work). + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is a *local* tar archive in a recognized compression format + (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources + from *remote* URLs are **not** decompressed. When a directory is copied or + unpacked, it has the same behavior as `tar -x`: the result is the union of: + + 1. Whatever existed at the destination path and + 2. The contents of the source tree, with conflicts resolved in favor + of "2." on a file-by-file basis. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## COPY + +COPY has two forms: + +- `COPY ... ` +- `COPY ["",... ""]` (this form is required for paths containing +whitespace) + +The `COPY` instruction copies new files or directories from `` +and adds them to the filesystem of the container at the path ``. + +Multiple `` resource may be specified but they must be relative +to the source directory that is being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. +For most command line uses this should act as expected, for example: + + COPY hom* /mydir/ # adds all files starting with "hom" + COPY hom?.txt /mydir/ # ? is replaced with any single character + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + COPY test aDir/ # adds "test" to `WORKDIR`/aDir/ + +All new files and directories are created with a UID and GID of 0. + +> **Note**: +> If you build using STDIN (`docker build - < somefile`), there is no +> build context, so `COPY` can't be used. + +The copy obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `COPY ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## ENTRYPOINT + +ENTRYPOINT has two forms: + +- `ENTRYPOINT ["executable", "param1", "param2"]` + (the preferred *exec* form) +- `ENTRYPOINT command param1 param2` + (*shell* form) + +An `ENTRYPOINT` allows you to configure a container that will run as an executable. + +For example, the following will start nginx with its default content, listening +on port 80: + + docker run -i -t --rm -p 80:80 nginx + +Command line arguments to `docker run ` will be appended after all +elements in an *exec* form `ENTRYPOINT`, and will override all elements specified +using `CMD`. +This allows arguments to be passed to the entry point, i.e., `docker run -d` +will pass the `-d` argument to the entry point. +You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` +flag. + +The *shell* form prevents any `CMD` or `run` command line arguments from being +used, but has the disadvantage that your `ENTRYPOINT` will be started as a +subcommand of `/bin/sh -c`, which does not pass signals. +This means that the executable will not be the container's `PID 1` - and +will _not_ receive Unix signals - so your executable will not receive a +`SIGTERM` from `docker stop `. + +Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. + +### Exec form ENTRYPOINT example + +You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands +and arguments and then use either form of `CMD` to set additional defaults that +are more likely to be changed. + + FROM ubuntu + ENTRYPOINT ["top", "-b"] + CMD ["-c"] + +When you run the container, you can see that `top` is the only process: + + $ docker run -it --rm --name test top -H + top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 + Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st + KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers + KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top + +To examine the result further, you can use `docker exec`: + + $ docker exec -it test ps aux + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H + root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux + +And you can gracefully request `top` to shut down using `docker stop test`. + +The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the +foreground (i.e., as `PID 1`): + +``` +FROM debian:stable +RUN apt-get update && apt-get install -y --force-yes apache2 +EXPOSE 80 443 +VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] +ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] +``` + +If you need to write a starter script for a single executable, you can ensure that +the final executable receives the Unix signals by using `exec` and `gosu` +commands: + +```bash +#!/bin/bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +Lastly, if you need to do some extra cleanup (or communicate with other containers) +on shutdown, or are co-ordinating more than one executable, you may need to ensure +that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then +does some more work: + +``` +#!/bin/sh +# Note: I've written this using sh so it works in the busybox container too + +# USE the trap if you need to also do manual cleanup after the service is stopped, +# or need to start multiple services in the one container +trap "echo TRAPed signal" HUP INT QUIT KILL TERM + +# start service in background here +/usr/sbin/apachectl start + +echo "[hit enter key to exit] or run 'docker stop '" +read + +# stop service and clean up here +echo "stopping apache" +/usr/sbin/apachectl stop + +echo "exited $0" +``` + +If you run this image with `docker run -it --rm -p 80:80 --name test apache`, +you can then examine the container's processes with `docker exec`, or `docker top`, +and then ask the script to stop Apache: + +```bash +$ docker exec -it test ps aux +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 +root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start +www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux +$ docker top test +PID USER COMMAND +10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 +10054 root /usr/sbin/apache2 -k start +10055 33 /usr/sbin/apache2 -k start +10056 33 /usr/sbin/apache2 -k start +$ /usr/bin/time docker stop test +test +real 0m 0.27s +user 0m 0.03s +sys 0m 0.03s +``` + +> **Note:** you can over ride the `ENTRYPOINT` setting using `--entrypoint`, +> but this can only set the binary to *exec* (no `sh -c` will be used). + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo", "$HOME" ]`. +> Variables that are defined in the `Dockerfile`using `ENV`, will be substituted by +> the `Dockerfile` parser. + +### Shell form ENTRYPOINT example + +You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. +This form will use shell processing to substitute shell environment variables, +and will ignore any `CMD` or `docker run` command line arguments. +To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable +correctly, you need to remember to start it with `exec`: + + FROM ubuntu + ENTRYPOINT exec top -b + +When you run this image, you'll see the single `PID 1` process: + + $ docker run -it --rm --name test top + Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached + CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq + Load average: 0.08 0.03 0.05 2/98 6 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root R 3164 0% 0% top -b + +Which will exit cleanly on `docker stop`: + + $ /usr/bin/time docker stop test + test + real 0m 0.20s + user 0m 0.02s + sys 0m 0.04s + +If you forget to add `exec` to the beginning of your `ENTRYPOINT`: + + FROM ubuntu + ENTRYPOINT top -b + CMD --ignored-param1 + +You can then run it (giving it a name for the next step): + + $ docker run -it --name test top --ignored-param2 + Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached + CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq + Load average: 0.01 0.02 0.05 2/101 7 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 + 7 1 root R 3164 0% 0% top -b + +You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. + +If you then run `docker stop test`, the container will not exit cleanly - the +`stop` command will be forced to send a `SIGKILL` after the timeout: + + $ docker exec -it test ps aux + PID USER COMMAND + 1 root /bin/sh -c top -b cmd cmd2 + 7 root top -b + 8 root ps aux + $ /usr/bin/time docker stop test + test + real 0m 10.19s + user 0m 0.04s + sys 0m 0.03s + +## VOLUME + + VOLUME ["/data"] + +The `VOLUME` instruction creates a mount point with the specified name +and marks it as holding externally mounted volumes from native host or other +containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain +string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log +/var/db`. For more information/examples and mounting instructions via the +Docker client, refer to +[*Share Directories via Volumes*](/userguide/dockervolumes/#volume) +documentation. + +The `docker run` command initializes the newly created volume with any data +that exists at the specified location within the base image. For example, +consider the following Dockerfile snippet: + + FROM ubuntu + RUN mkdir /myvol + RUN echo "hello world" > /myvol/greeting + VOLUME /myvol + +This Dockerfile results in an image that causes `docker run`, to +create a new mount point at `/myvol` and copy the `greeting` file +into the newly created volume. + +> **Note**: +> The list is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +## USER + + USER daemon + +The `USER` instruction sets the user name or UID to use when running the image +and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the +`Dockerfile`. + +## WORKDIR + + WORKDIR /path/to/workdir + +The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, +`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. + +It can be used multiple times in the one `Dockerfile`. If a relative path +is provided, it will be relative to the path of the previous `WORKDIR` +instruction. For example: + + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/a/b/c`. + +The `WORKDIR` instruction can resolve environment variables previously set using +`ENV`. You can only use environment variables explicitly set in the `Dockerfile`. +For example: + + ENV DIRPATH /path + WORKDIR $DIRPATH/$DIRNAME + +The output of the final `pwd` command in this `Dockerfile` would be +`/path/$DIRNAME` + +## ONBUILD + + ONBUILD [INSTRUCTION] + +The `ONBUILD` instruction adds to the image a *trigger* instruction to +be executed at a later time, when the image is used as the base for +another build. The trigger will be executed in the context of the +downstream build, as if it had been inserted immediately after the +`FROM` instruction in the downstream `Dockerfile`. + +Any build instruction can be registered as a trigger. + +This is useful if you are building an image which will be used as a base +to build other images, for example an application build environment or a +daemon which may be customized with user-specific configuration. + +For example, if your image is a reusable Python application builder, it +will require application source code to be added in a particular +directory, and it might require a build script to be called *after* +that. You can't just call `ADD` and `RUN` now, because you don't yet +have access to the application source code, and it will be different for +each application build. You could simply provide application developers +with a boilerplate `Dockerfile` to copy-paste into their application, but +that is inefficient, error-prone and difficult to update because it +mixes with application-specific code. + +The solution is to use `ONBUILD` to register advance instructions to +run later, during the next build stage. + +Here's how it works: + +1. When it encounters an `ONBUILD` instruction, the builder adds a + trigger to the metadata of the image being built. The instruction + does not otherwise affect the current build. +2. At the end of the build, a list of all triggers is stored in the + image manifest, under the key `OnBuild`. They can be inspected with + the `docker inspect` command. +3. Later the image may be used as a base for a new build, using the + `FROM` instruction. As part of processing the `FROM` instruction, + the downstream builder looks for `ONBUILD` triggers, and executes + them in the same order they were registered. If any of the triggers + fail, the `FROM` instruction is aborted which in turn causes the + build to fail. If all triggers succeed, the `FROM` instruction + completes and the build continues as usual. +4. Triggers are cleared from the final image after being executed. In + other words they are not inherited by "grand-children" builds. + +For example you might add something like this: + + [...] + ONBUILD ADD . /app/src + ONBUILD RUN /usr/local/bin/python-build --dir /app/src + [...] + +> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. + +> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. + +## Dockerfile examples + + # Nginx + # + # VERSION 0.0.1 + + FROM ubuntu + MAINTAINER Victor Vieux + + LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" + RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server + + # Firefox over VNC + # + # VERSION 0.3 + + FROM ubuntu + + # Install vnc, xvfb in order to create a 'fake' display and firefox + RUN apt-get update && apt-get install -y x11vnc xvfb firefox + RUN mkdir ~/.vnc + # Setup a password + RUN x11vnc -storepasswd 1234 ~/.vnc/passwd + # Autostart firefox (might not be the best way, but it does the trick) + RUN bash -c 'echo "firefox" >> /.bashrc' + + EXPOSE 5900 + CMD ["x11vnc", "-forever", "-usepw", "-create"] + + # Multiple images example + # + # VERSION 0.1 + + FROM ubuntu + RUN echo foo > bar + # Will output something like ===> 907ad6c2736f + + FROM ubuntu + RUN echo moo > oink + # Will output something like ===> 695d7793cbe4 + + # You᾿ll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with + # /oink. + diff --git a/docs/reference/commandline/attach.md b/docs/reference/commandline/attach.md new file mode 100644 index 00000000..89cc3f25 --- /dev/null +++ b/docs/reference/commandline/attach.md @@ -0,0 +1,91 @@ + + +# attach + + Usage: docker attach [OPTIONS] CONTAINER + + Attach to a running container + + --no-stdin=false Do not attach STDIN + --sig-proxy=true Proxy all received signals to the process + +The `docker attach` command allows you to attach to a running container using +the container's ID or name, either to view its ongoing output or to control it +interactively. You can attach to the same contained process multiple times +simultaneously, screen sharing style, or quickly view the progress of your +daemonized process. + +You can detach from the container and leave it running with `CTRL-p CTRL-q` +(for a quiet exit) or with `CTRL-c` if `--sig-proxy` is false. + +If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to the +container. + +> **Note:** +> A process running as PID 1 inside a container is treated specially by +> Linux: it ignores any signal with the default action. So, the process +> will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do +> so. + +It is forbidden to redirect the standard input of a `docker attach` command +while attaching to a tty-enabled container (i.e.: launched with `-t`). + +#### Examples + + $ docker run -d --name topdemo ubuntu /usr/bin/top -b + $ docker attach topdemo + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + + + top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355780k used, 17792k free, 27880k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + ^C$ + $ echo $? + 0 + $ docker ps -a | grep topdemo + 7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo + +And in this second example, you can see the exit code returned by the `bash` +process is returned by the `docker attach` command to its caller too: + + $ docker run --name test -d -it debian + 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab + $ docker attach test + $$ exit 13 + exit + $ echo $? + 13 + $ docker ps -a | grep test + 275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test + diff --git a/docs/reference/commandline/build.md b/docs/reference/commandline/build.md new file mode 100644 index 00000000..037003d9 --- /dev/null +++ b/docs/reference/commandline/build.md @@ -0,0 +1,253 @@ + + +# build + + Usage: docker build [OPTIONS] PATH | URL | - + + Build a new image from the source code at PATH + + -f, --file="" Name of the Dockerfile (Default is 'PATH/Dockerfile') + --force-rm=false Always remove intermediate containers + --no-cache=false Do not use cache when building the image + --pull=false Always attempt to pull a newer version of the image + -q, --quiet=false Suppress the verbose output generated by the containers + --rm=true Remove intermediate containers after a successful build + -t, --tag="" Repository name (and optionally a tag) for the image + -m, --memory="" Memory limit for all build containers + --memory-swap="" Total memory (memory + swap), `-1` to disable swap + -c, --cpu-shares CPU Shares (relative weight) + --cpuset-mems="" MEMs in which to allow execution, e.g. `0-3`, `0,1` + --cpuset-cpus="" CPUs in which to allow execution, e.g. `0-3`, `0,1` + --cgroup-parent="" Optional parent cgroup for the container + --ulimit=[] Ulimit options + +Builds Docker images from a Dockerfile and a "context". A build's context is +the files located in the specified `PATH` or `URL`. The build process can refer +to any of the files in the context. For example, your build can use an +[*ADD*](/reference/builder/#add) instruction to reference a file in the +context. + +The `URL` parameter can specify the location of a Git repository; the repository +acts as the build context. The system recursively clones the repository and its +submodules using a `git clone --depth 1 --recursive` command. This command runs +in a temporary directory on your local host. After the command succeeds, the +directory is sent to the Docker daemon as the context. Local clones give you the +ability to access private repositories using local user credentials, VPNs, and +so forth. + +Git URLs accept context configuration in their fragment section, separated by a +colon `:`. The first part represents the reference that Git will check out, +this can be either a branch, a tag, or a commit SHA. The second part represents +a subdirectory inside the repository that will be used as a build context. + +For example, run this command to use a directory called `docker` in the branch +`container`: + + $ docker build https://github.com/docker/rootfs.git#container:docker + +The following table represents all the valid suffixes with their build +contexts: + +Build Syntax Suffix | Commit Used | Build Context Used +--------------------|-------------|------------------- +`myrepo.git` | `refs/heads/master` | `/` +`myrepo.git#mytag` | `refs/tags/mytag` | `/` +`myrepo.git#mybranch` | `refs/heads/mybranch` | `/` +`myrepo.git#abcdef` | `sha1 = abcdef` | `/` +`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` +`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` +`myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder` + +Instead of specifying a context, you can pass a single Dockerfile in the `URL` +or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`: + + docker build - < Dockerfile + +If you use STDIN or specify a `URL`, the system places the contents into a file +called `Dockerfile`, and any `-f`, `--file` option is ignored. In this +scenario, there is no context. + +By default the `docker build` command will look for a `Dockerfile` at the root +of the build context. The `-f`, `--file`, option lets you specify the path to +an alternative file to use instead. This is useful in cases where the same set +of files are used for multiple builds. The path must be to a file within the +build context. If a relative path is specified then it must to be relative to +the current directory. + +In most cases, it's best to put each Dockerfile in an empty directory. Then, +add to that directory only the files needed for building the Dockerfile. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to that directory as well. For information on +creating one, see the [.dockerignore file](/reference/builder#dockerignore-file). + +If the Docker client loses connection to the daemon, the build is canceled. +This happens if you interrupt the Docker client with `ctrl-c` or if the Docker +client is killed for any reason. + +> **Note:** +> Currently only the "run" phase of the build can be canceled until pull +> cancellation is implemented). + +## Return code + +On a successful build, a return code of success `0` will be returned. When the +build fails, a non-zero failure code will be returned. + +There should be informational output of the reason for failure output to +`STDERR`: + + $ docker build -t fail . + Sending build context to Docker daemon 2.048 kB + Sending build context to Docker daemon + Step 0 : FROM busybox + ---> 4986bf8c1536 + Step 1 : RUN exit 13 + ---> Running in e26670ec7a0a + INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 + $ echo $? + 1 + +See also: + +[*Dockerfile Reference*](/reference/builder). + +## Examples + + $ docker build . + Uploading context 10240 bytes + Step 1 : FROM busybox + Pulling repository busybox + ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ + Step 2 : RUN ls -lh / + ---> Running in 9c9e81692ae9 + total 24 + drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin + drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev + drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc + drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib + lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib + dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc + lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin + dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys + drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp + drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr + ---> b35f4035db3f + Step 3 : CMD echo Hello world + ---> Running in 02071fceb21b + ---> f52f38b7823e + Successfully built f52f38b7823e + Removing intermediate container 9c9e81692ae9 + Removing intermediate container 02071fceb21b + +This example specifies that the `PATH` is `.`, and so all the files in the +local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies +where to find the files for the "context" of the build on the Docker daemon. +Remember that the daemon could be running on a remote machine and that no +parsing of the Dockerfile happens at the client side (where you're running +`docker build`). That means that *all* the files at `PATH` get sent, not just +the ones listed to [*ADD*](/reference/builder/#add) in the Dockerfile. + +The transfer of context from the local machine to the Docker daemon is what the +`docker` client means when you see the "Sending build context" message. + +If you wish to keep the intermediate containers after the build is complete, +you must use `--rm=false`. This does not affect the build cache. + + $ docker build . + Uploading context 18.829 MB + Uploading context + Step 0 : FROM busybox + ---> 769b9341d937 + Step 1 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 + Successfully built 99cc1ad10469 + $ echo ".git" > .dockerignore + $ docker build . + Uploading context 6.76 MB + Uploading context + Step 0 : FROM busybox + ---> 769b9341d937 + Step 1 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 + Successfully built 99cc1ad10469 + +This example shows the use of the `.dockerignore` file to exclude the `.git` +directory from the context. Its effect can be seen in the changed size of the +uploaded context. The builder reference contains detailed information on +[creating a .dockerignore file](../../builder/#dockerignore-file) + + $ docker build -t vieux/apache:2.0 . + +This will build like the previous example, but it will then tag the resulting +image. The repository name will be `vieux/apache` and the tag will be `2.0` + + $ docker build - < Dockerfile + +This will read a Dockerfile from `STDIN` without context. Due to the lack of a +context, no contents of any local directory will be sent to the Docker daemon. +Since there is no context, a Dockerfile `ADD` only works if it refers to a +remote URL. + + $ docker build - < context.tar.gz + +This will build an image for a compressed context read from `STDIN`. Supported +formats are: bzip2, gzip and xz. + + $ docker build github.com/creack/docker-firefox + +This will clone the GitHub repository and use the cloned repository as context. +The Dockerfile at the root of the repository is used as Dockerfile. Note that +you can specify an arbitrary Git repository by using the `git://` or `git@` +schema. + + $ docker build -f Dockerfile.debug . + +This will use a file called `Dockerfile.debug` for the build instructions +instead of `Dockerfile`. + + $ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . + $ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . + +The above commands will build the current build context (as specified by the +`.`) twice, once using a debug version of a `Dockerfile` and once using a +production version. + + $ cd /home/me/myapp/some/dir/really/deep + $ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp + $ docker build -f ../../../../dockerfiles/debug /home/me/myapp + +These two `docker build` commands do the exact same thing. They both use the +contents of the `debug` file instead of looking for a `Dockerfile` and will use +`/home/me/myapp` as the root of the build context. Note that `debug` is in the +directory structure of the build context, regardless of how you refer to it on +the command line. + +> **Note:** +> `docker build` will return a `no such file or directory` error if the +> file or directory does not exist in the uploaded context. This may +> happen if there is no context, or if you specify a file that is +> elsewhere on the Host system. The context is limited to the current +> directory (and its children) for security reasons, and to ensure +> repeatable builds on remote Docker hosts. This is also the reason why +> `ADD ../file` will not work. + +When `docker build` is run with the `--cgroup-parent` option the containers +used in the build will be run with the [corresponding `docker run` +flag](/reference/run/#specifying-custom-cgroups). + +Using the `--ulimit` option with `docker build` will cause each build step's +container to be started using those [`--ulimit` +flag values](/reference/run/#setting-ulimits-in-a-container). diff --git a/docs/reference/commandline/cli.md b/docs/reference/commandline/cli.md new file mode 100644 index 00000000..68d82fc2 --- /dev/null +++ b/docs/reference/commandline/cli.md @@ -0,0 +1,169 @@ + + +# Using the command line + +To list available commands, either run `docker` with no parameters +or execute `docker help`: + + $ docker + Usage: docker [OPTIONS] COMMAND [arg...] + docker daemon [ --help | ... ] + docker [ --help | -v | --version ] + + -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + + A self-sufficient runtime for Linux containers. + + ... + +Depending on your Docker system configuration, you may be required to preface +each `docker` command with `sudo`. To avoid having to use `sudo` with the +`docker` command, your system administrator can create a Unix group called +`docker` and add users to it. + +For more information about installing Docker or `sudo` configuration, refer to +the [installation](/installation) instructions for your operating system. + +## Environment variables + +For easy reference, the following list of environment variables are supported +by the `docker` command line: + +* `DOCKER_CONFIG` The location of your client configuration files. +* `DOCKER_CERT_PATH` The location of your authentication keys. +* `DOCKER_DRIVER` The graph driver to use. +* `DOCKER_HOST` Daemon socket to connect to. +* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is + unsuitable for Docker. +* `DOCKER_RAMDISK` If set this will disable 'pivot_root'. +* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote. +* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images. + Equates to `--disable-content-trust=false` for build, create, pull, push, run. +* `DOCKER_TMPDIR` Location for temporary Docker files. + +Because Docker is developed using 'Go', you can also use any environment +variables used by the 'Go' runtime. In particular, you may find these useful: + +* `HTTP_PROXY` +* `HTTPS_PROXY` +* `NO_PROXY` + +These Go environment variables are case-insensitive. See the +[Go specification](http://golang.org/pkg/net/http/) for details on these +variables. + +## Configuration files + +By default, the Docker command line stores its configuration files in a +directory called `.docker` within your `HOME` directory. However, you can +specify a different location via the `DOCKER_CONFIG` environment variable +or the `--config` command line option. If both are specified, then the +`--config` option overrides the `DOCKER_CONFIG` environment variable. +For example: + + docker --config ~/testconfigs/ ps + +Instructs Docker to use the configuration files in your `~/testconfigs/` +directory when running the `ps` command. + +Docker manages most of the files in the configuration directory +and you should not modify them. However, you *can modify* the +`config.json` file to control certain aspects of how the `docker` +command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of several properties: + +The property `HttpHeaders` specifies a set of headers to include in all messages +sent from the Docker client to the daemon. Docker does not try to interpret or +understand these header; it simply puts them into the messages. Docker does +not allow these headers to change any headers it sets for itself. + +The property `psFormat` specifies the default format for `docker ps` output. +When the `--format` flag is not provided with the `docker ps` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker ps` documentation](../ps) + +Following is a sample `config.json` file: + + { + "HttpHeaders: { + "MyHeader": "MyValue" + }, + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}" + } + +## Help + +To list the help on any command just execute the command, followed by the +`--help` option. + + $ docker run --help + + Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + + Run a command in a new container + + -a, --attach=[] Attach to STDIN, STDOUT or STDERR + -c, --cpu-shares=0 CPU shares (relative weight) + ... + +## Option types + +Single character command line options can be combined, so rather than +typing `docker run -i -t --name test busybox sh`, +you can write `docker run -it --name test busybox sh`. + +### Boolean + +Boolean options take the form `-d=false`. The value you see in the help text is +the default value which is set if you do **not** specify that flag. If you +specify a Boolean flag without a value, this will set the flag to `true`, +irrespective of the default value. + +For example, running `docker run -d` will set the value to `true`, so your +container **will** run in "detached" mode, in the background. + +Options which default to `true` (e.g., `docker build --rm=true`) can only be +set to the non-default value by explicitly setting them to `false`: + + $ docker build --rm=false . + +### Multi + +You can specify options like `-a=[]` multiple times in a single command line, +for example in these commands: + + $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + $ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls + +Sometimes, multiple options can call for a more complex value string as for +`-v`: + + $ docker run -v /host:/container example/mysql + +> **Note:** +> Do not use the `-t` and `-a stderr` options together due to +> limitations in the `pty` implementation. All `stderr` in `pty` mode +> simply goes to `stdout`. + +### Strings and Integers + +Options like `--name=""` expect a string, and they +can only be specified once. Options like `-c=0` +expect an integer, and they can only be specified once. diff --git a/docs/reference/commandline/commit.md b/docs/reference/commandline/commit.md new file mode 100644 index 00000000..cec7de67 --- /dev/null +++ b/docs/reference/commandline/commit.md @@ -0,0 +1,64 @@ + + +# commit + + Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] + + Create a new image from a container's changes + + -a, --author="" Author (e.g., "John Hannibal Smith ") + -c, --change=[] Apply specified Dockerfile instructions while committing the image + -m, --message="" Commit message + -p, --pause=true Pause container during commit + +It can be useful to commit a container's file changes or settings into a new +image. This allows you debug a container by running an interactive shell, or to +export a working dataset to another server. Generally, it is better to use +Dockerfiles to manage your images in a documented and maintainable way. + +The commit operation will not include any data contained in +volumes mounted inside the container. + +By default, the container being committed and its processes will be paused +while the image is committed. This reduces the likelihood of encountering data +corruption during the process of creating the commit. If this behavior is +undesired, set the 'p' option to false. + +The `--change` option will apply `Dockerfile` instructions to the image that is +created. Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Commit a container + + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + $ docker commit c3f279d17e0a SvenDowideit/testimage:version3 + f5283438590d + $ docker images + REPOSITORY TAG ID CREATED VIRTUAL SIZE + SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB + +## Commit a container with new configurations + + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + $ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a + [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] + $ docker commit --change "ENV DEBUG true" c3f279d17e0a SvenDowideit/testimage:version3 + f5283438590d + $ docker inspect -f "{{ .Config.Env }}" f5283438590d + [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] + diff --git a/docs/reference/commandline/cp.md b/docs/reference/commandline/cp.md new file mode 100644 index 00000000..45c4b539 --- /dev/null +++ b/docs/reference/commandline/cp.md @@ -0,0 +1,91 @@ + + +# cp + +Copy files/folders between a container and the local filesystem. + + Usage: docker cp [options] CONTAINER:PATH LOCALPATH|- + docker cp [options] LOCALPATH|- CONTAINER:PATH + + --help Print usage statement + +In the first synopsis form, the `docker cp` utility copies the contents of +`PATH` from the filesystem of `CONTAINER` to the `LOCALPATH` (or stream as +a tar archive to `STDOUT` if `-` is specified). + +In the second synopsis form, the contents of `LOCALPATH` (or a tar archive +streamed from `STDIN` if `-` is specified) are copied from the local machine to +`PATH` in the filesystem of `CONTAINER`. + +You can copy to or from either a running or stopped container. The `PATH` can +be a file or directory. The `docker cp` command assumes all `CONTAINER:PATH` +values are relative to the `/` (root) directory of the container. This means +supplying the initial forward slash is optional; The command sees +`compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. If a `LOCALPATH` value +is not absolute, is it considered relative to the current working directory. + +Behavior is similar to the common Unix utility `cp -a` in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group on the receiving end of the transfer. For example, +files copied to a container will be created with `UID:GID` of the root user. +Files copied to the local machine will be created with the `UID:GID` of the +user which invoked the `docker cp` command. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DST_PATH` does not exist + - the file is saved to a file created at `DST_PATH` + - `DST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DST_PATH` exists and is a file + - the destination is overwritten with the contents of the source file + - `DST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DST_PATH` does not exist + - `DST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` + - the source directory is copied into this directory + - `SRC_PAPTH` does end with `/.` + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied. + +A colon (`:`) is used as a delimiter between `CONTAINER` and `PATH`, but `:` +could also be in a valid `LOCALPATH`, like `file:name.txt`. This ambiguity is +resolved by requiring a `LOCALPATH` with a `:` to be made explicit with a +relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, and mounts created by the user in the container. + +Using `-` as the first argument in place of a `LOCALPATH` will stream the +contents of `STDIN` as a tar archive which will be extracted to the `PATH` in +the filesystem of the destination container. In this case, `PATH` must specify +a directory. + +Using `-` as the second argument in place of a `LOCALPATH` will stream the +contents of the resource from the source container as a tar archive to +`STDOUT`. diff --git a/docs/reference/commandline/create.md b/docs/reference/commandline/create.md new file mode 100644 index 00000000..8c093f18 --- /dev/null +++ b/docs/reference/commandline/create.md @@ -0,0 +1,120 @@ + + +# create + +Creates a new container. + + Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] + + Create a new container + + -a, --attach=[] Attach to STDIN, STDOUT or STDERR + --add-host=[] Add a custom host-to-IP mapping (host:ip) + --blkio-weight=0 Block IO weight (relative weight) + -c, --cpu-shares=0 CPU shares (relative weight) + --cap-add=[] Add Linux capabilities + --cap-drop=[] Drop Linux capabilities + --cgroup-parent="" Optional parent cgroup for the container + --cidfile="" Write the container ID to the file + --cpu-period=0 Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota=0 Limit CPU CFS (Completely Fair Scheduler) quota + --cpuset-cpus="" CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1) + --device=[] Add a host device to the container + --dns=[] Set custom DNS servers + --dns-search=[] Set custom DNS search domains + -e, --env=[] Set environment variables + --entrypoint="" Overwrite the default ENTRYPOINT of the image + --env-file=[] Read in a file of environment variables + --expose=[] Expose a port or a range of ports + -h, --hostname="" Container host name + --help=false Print usage + -i, --interactive=false Keep STDIN open even if not attached + --ipc="" IPC namespace to use + -l, --label=[] Set metadata on the container (e.g., --label=com.example.key=value) + --label-file=[] Read in a line delimited file of labels + --link=[] Add link to another container + --log-driver="" Logging driver for container + --log-opt=[] Log driver specific options + --lxc-conf=[] Add custom lxc options + -m, --memory="" Memory limit + --mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33) + --memory-swap="" Total memory (memory + swap), '-1' to disable swap + --memory-swappiness="" Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + --name="" Assign a name to the container + --net="bridge" Set the Network mode for the container + --oom-kill-disable=false Whether to disable OOM Killer for the container or not + -P, --publish-all=false Publish all exposed ports to random ports + -p, --publish=[] Publish a container's port(s) to the host + --pid="" PID namespace to use + --privileged=false Give extended privileges to this container + --read-only=false Mount the container's root filesystem as read only + --restart="no" Restart policy (no, on-failure[:max-retry], always) + --security-opt=[] Security options + -t, --tty=false Allocate a pseudo-TTY + --disable-content-trust=true Skip image verification + -u, --user="" Username or UID + --ulimit=[] Ulimit options + --uts="" UTS namespace to use + -v, --volume=[] Bind mount a volume + --volumes-from=[] Mount volumes from the specified container(s) + -w, --workdir="" Working directory inside the container + +The `docker create` command creates a writeable container layer over the +specified image and prepares it for running the specified command. The +container ID is then printed to `STDOUT`. This is similar to `docker run -d` +except the container is never started. You can then use the +`docker start ` command to start the container at any point. + +This is useful when you want to set up a container configuration ahead of time +so that it is ready to start when you need it. The initial status of the +new container is `created`. + +Please see the [run command](/reference/commandline/run) section and the [Docker run reference]( +/reference/run/) for more details. + +## Examples + + $ docker create -t -i fedora bash + 6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 + $ docker start -a -i 6d8af538ec5 + bash-4.2# + +As of v1.4.0 container volumes are initialized during the `docker create` phase +(i.e., `docker run` too). For example, this allows you to `create` the `data` +volume container, and then use it from another container: + + $ docker create -v /data --name data ubuntu + 240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 + $ docker run --rm --volumes-from data ubuntu ls -la /data + total 8 + drwxr-xr-x 2 root root 4096 Dec 5 04:10 . + drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. + +Similarly, `create` a host directory bind mounted volume container, which can +then be used from the subsequent container: + + $ docker create -v /home/docker:/docker --name docker ubuntu + 9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 + $ docker run --rm --volumes-from docker ubuntu ls -la /docker + total 20 + drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . + drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. + -rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history + -rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc + -rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig + drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local + -rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile + drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh + drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker + + diff --git a/docs/reference/commandline/daemon.md b/docs/reference/commandline/daemon.md new file mode 100644 index 00000000..3e68df16 --- /dev/null +++ b/docs/reference/commandline/daemon.md @@ -0,0 +1,499 @@ + + +# daemon + + Usage: docker daemon [OPTIONS] + + A self-sufficient runtime for linux containers. + + Options: + --api-cors-header="" Set CORS headers in the remote API + -b, --bridge="" Attach containers to a network bridge + --bip="" Specify network bridge IP + -D, --debug=false Enable debug mode + --default-gateway="" Container default gateway IPv4 address + --default-gateway-v6="" Container default gateway IPv6 address + --dns=[] DNS server to use + --dns-search=[] DNS search domains to use + --default-ulimit=[] Set default ulimit settings for containers + -e, --exec-driver="native" Exec driver to use + --exec-opt=[] Set exec driver options + --exec-root="/var/run/docker" Root of the Docker execdriver + --fixed-cidr="" IPv4 subnet for fixed IPs + --fixed-cidr-v6="" IPv6 subnet for fixed IPs + -G, --group="docker" Group for the unix socket + -g, --graph="/var/lib/docker" Root of the Docker runtime + -H, --host=[] Daemon socket(s) to connect to + --help=false Print usage + --icc=true Enable inter-container communication + --insecure-registry=[] Enable insecure registry communication + --ip=0.0.0.0 Default IP when binding container ports + --ip-forward=true Enable net.ipv4.ip_forward + --ip-masq=true Enable IP masquerading + --iptables=true Enable addition of iptables rules + --ipv6=false Enable IPv6 networking + -l, --log-level="info" Set the logging level + --label=[] Set key=value labels to the daemon + --log-driver="json-file" Default driver for container logs + --log-opt=[] Log driver specific options + --mtu=0 Set the containers network MTU + --disable-legacy-registry=false Do not contact legacy registries + -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file + --registry-mirror=[] Preferred Docker registry mirror + -s, --storage-driver="" Storage driver to use + --selinux-enabled=false Enable selinux support + --storage-opt=[] Set storage driver options + --tls=false Use TLS; implied by --tlsverify + --tlscacert="~/.docker/ca.pem" Trust certs signed only by this CA + --tlscert="~/.docker/cert.pem" Path to TLS certificate file + --tlskey="~/.docker/key.pem" Path to TLS key file + --tlsverify=false Use TLS and verify the remote + --userland-proxy=true Use userland proxy for loopback traffic + +Options with [] may be specified multiple times. + +The Docker daemon is the persistent process that manages containers. Docker +uses the same binary for both the daemon and client. To run the daemon you +type `docker daemon`. + +To run the daemon with debug output, use `docker daemon -D`. + +## Daemon socket option + +The Docker daemon can listen for [Docker Remote API](/reference/api/docker_remote_api/) +requests via three different types of Socket: `unix`, `tcp`, and `fd`. + +By default, a `unix` domain socket (or IPC socket) is created at +`/var/run/docker.sock`, requiring either `root` permission, or `docker` group +membership. + +If you need to access the Docker daemon remotely, you need to enable the `tcp` +Socket. Beware that the default setup provides un-encrypted and +un-authenticated direct access to the Docker daemon - and should be secured +either using the [built in HTTPS encrypted socket](/articles/https/), or by +putting a secure web proxy in front of it. You can listen on port `2375` on all +network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network +interface using its IP address: `-H tcp://192.168.59.103:2375`. It is +conventional to use port `2375` for un-encrypted, and port `2376` for encrypted +communication with the daemon. + +> **Note:** +> If you're using an HTTPS encrypted socket, keep in mind that only +> TLS1.0 and greater are supported. Protocols SSLv3 and under are not +> supported anymore for security reasons. + +On Systemd based systems, you can communicate with the daemon via +[Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), +use `docker daemon -H fd://`. Using `fd://` will work perfectly for most setups but +you can also specify individual sockets: `docker daemon -H fd://3`. If the +specified socket activated files aren't found, then Docker will exit. You can +find examples of using Systemd socket activation with Docker and Systemd in the +[Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). + +You can configure the Docker daemon to listen to multiple sockets at the same +time using multiple `-H` options: + + # listen using the default unix socket, and on 2 specific IP addresses on this host. + docker daemon -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 + +The Docker client will honor the `DOCKER_HOST` environment variable to set the +`-H` flag for the client. + + $ docker -H tcp://0.0.0.0:2375 ps + # or + $ export DOCKER_HOST="tcp://0.0.0.0:2375" + $ docker ps + # both are equal + +Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than +the empty string is equivalent to setting the `--tlsverify` flag. The following +are equivalent: + + $ docker --tlsverify ps + # or + $ export DOCKER_TLS_VERIFY=1 + $ docker ps + +The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes +precedence over `HTTP_PROXY`. + +### Daemon storage-driver option + +The Docker daemon has support for several different image layer storage +drivers: `aufs`, `devicemapper`, `btrfs`, `zfs` and `overlay`. + +The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that +is unlikely to be merged into the main kernel. These are also known to cause +some serious kernel crashes. However, `aufs` is also the only storage driver +that allows containers to share executable and shared library memory, so is a +useful choice when running thousands of containers with the same program or +libraries. + +The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) +snapshots. For each devicemapper graph location – typically +`/var/lib/docker/devicemapper` – a thin pool is created based on two block +devices, one for data and one for metadata. By default, these block devices +are created automatically by using loopback mounts of automatically created +sparse files. Refer to [Storage driver options](#storage-driver-options) below +for a way how to customize this setup. +[~jpetazzo/Resizing Docker containers with the Device Mapper plugin](http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) +article explains how to tune your existing setup without the use of options. + +The `btrfs` driver is very fast for `docker build` - but like `devicemapper` +does not share executable memory between devices. Use +`docker daemon -s btrfs -g /mnt/btrfs_partition`. + +The `zfs` driver is probably not fast as `btrfs` but has a longer track record +on stability. Thanks to `Single Copy ARC` shared blocks between clones will be +cached only once. Use `docker daemon -s zfs`. To select a different zfs filesystem +set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options). + +The `overlay` is a very fast union filesystem. It is now merged in the main +Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). Call +`docker daemon -s overlay` to use it. + +> **Note:** +> As promising as `overlay` is, the feature is still quite young and should not +> be used in production. Most notably, using `overlay` can cause excessive +> inode consumption (especially as the number of images grows), as well as +> being incompatible with the use of RPMs. + +> **Note:** +> It is currently unsupported on `btrfs` or any Copy on Write filesystem +> and should only be used over `ext4` partitions. + +### Storage driver options + +Particular storage-driver can be configured with options specified with +`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm` and +options for `zfs` start with `zfs`. + +* `dm.thinpooldev` + + Specifies a custom block storage device to use for the thin pool. + + If using a block device for device mapper storage, it is best to use `lvm` + to create and manage the thin-pool volume. This volume is then handed to Docker + to exclusively create snapshot volumes needed for images and containers. + + Managing the thin-pool outside of Docker makes for the most feature-rich + method of having Docker utilize device mapper thin provisioning as the + backing storage for Docker's containers. The highlights of the lvm-based + thin-pool management feature include: automatic or interactive thin-pool + resize support, dynamically changing thin-pool features, automatic thinp + metadata checking when lvm activates the thin-pool, etc. + + Example use: + + docker daemon --storage-opt dm.thinpooldev=/dev/mapper/thin-pool + + * `dm.basesize` + + Specifies the size to use when creating the base device, which limits the + size of images and containers. The default value is 100G. Note, thin devices + are inherently "sparse", so a 100G device which is mostly empty doesn't use + 100 GB of space on the pool. However, the filesystem will use more space for + the empty case the larger the device is. + + This value affects the system-wide "base" empty filesystem + that may already be initialized and inherited by pulled images. Typically, + a change to this value requires additional steps to take effect: + + $ sudo service docker stop + $ sudo rm -rf /var/lib/docker + $ sudo service docker start + + Example use: + + $ docker daemon --storage-opt dm.basesize=20G + + * `dm.loopdatasize` + + >**Note**: This option configures devicemapper loopback, which should not be used in production. + + Specifies the size to use when creating the loopback file for the + "data" device which is used for the thin pool. The default size is + 100G. The file is sparse, so it will not initially take up this + much space. + + Example use: + + $ docker daemon --storage-opt dm.loopdatasize=200G + + * `dm.loopmetadatasize` + + >**Note**: This option configures devicemapper loopback, which should not be used in production. + + Specifies the size to use when creating the loopback file for the + "metadadata" device which is used for the thin pool. The default size + is 2G. The file is sparse, so it will not initially take up + this much space. + + Example use: + + $ docker daemon --storage-opt dm.loopmetadatasize=4G + + * `dm.fs` + + Specifies the filesystem type to use for the base device. The supported + options are "ext4" and "xfs". The default is "ext4" + + Example use: + + $ docker daemon --storage-opt dm.fs=xfs + + * `dm.mkfsarg` + + Specifies extra mkfs arguments to be used when creating the base device. + + Example use: + + $ docker daemon --storage-opt "dm.mkfsarg=-O ^has_journal" + + * `dm.mountopt` + + Specifies extra mount options used when mounting the thin devices. + + Example use: + + $ docker daemon --storage-opt dm.mountopt=nodiscard + + * `dm.datadev` + + (Deprecated, use `dm.thinpooldev`) + + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both datadev and + metadatadev should be specified to completely avoid using the loopback + device. + + Example use: + + $ docker daemon --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1 + + * `dm.metadatadev` + + (Deprecated, use `dm.thinpooldev`) + + Specifies a custom blockdevice to use for metadata for the thin pool. + + For best performance the metadata should be on a different spindle than the + data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This can be + achieved by zeroing the first 4k to indicate empty metadata, like this: + + $ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 + + Example use: + + $ docker daemon --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1 + + * `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. The default + blocksize is 64K. + + Example use: + + $ docker daemon --storage-opt dm.blocksize=512K + + * `dm.blkdiscard` + + Enables or disables the use of blkdiscard when removing devicemapper + devices. This is enabled by default (only) if using loopback devices and is + required to resparsify the loopback file on image/container removal. + + Disabling this on loopback can lead to *much* faster container removal + times, but will make the space used in `/var/lib/docker` directory not be + returned to the system for other use when containers are removed. + + Example use: + + $ docker daemon --storage-opt dm.blkdiscard=false + + * `dm.override_udev_sync_check` + + Overrides the `udev` synchronization checks between `devicemapper` and `udev`. + `udev` is the device manager for the Linux kernel. + + To view the `udev` sync support of a Docker daemon that is using the + `devicemapper` driver, run: + + $ docker info + [...] + Udev Sync Supported: true + [...] + + When `udev` sync support is `true`, then `devicemapper` and udev can + coordinate the activation and deactivation of devices for containers. + + When `udev` sync support is `false`, a race condition occurs between + the`devicemapper` and `udev` during create and cleanup. The race condition + results in errors and failures. (For information on these failures, see + [docker#4036](https://github.com/docker/docker/issues/4036)) + + To allow the `docker` daemon to start, regardless of `udev` sync not being + supported, set `dm.override_udev_sync_check` to true: + + $ docker daemon --storage-opt dm.override_udev_sync_check=true + + When this value is `true`, the `devicemapper` continues and simply warns + you the errors are happening. + + > **Note:** + > The ideal is to pursue a `docker` daemon and environment that does + > support synchronizing with `udev`. For further discussion on this + > topic, see [docker#4036](https://github.com/docker/docker/issues/4036). + > Otherwise, set this flag for migrating existing Docker daemons to + > a daemon with a supported environment. + + +## Docker execdriver option + +Currently supported options of `zfs`: + + * `zfs.fsname` + + Set zfs filesystem under which docker will create its own datasets. + By default docker will pick up the zfs filesystem where docker graph + (`/var/lib/docker`) is located. + + Example use: + + $ docker daemon -s zfs --storage-opt zfs.fsname=zroot/docker + +## Docker execdriver option + +The Docker daemon uses a specifically built `libcontainer` execution driver as +its interface to the Linux kernel `namespaces`, `cgroups`, and `SELinux`. + +There is still legacy support for the original [LXC userspace tools]( +https://linuxcontainers.org/) via the `lxc` execution driver, however, this is +not where the primary development of new functionality is taking place. +Add `-e lxc` to the daemon flags to use the `lxc` execution driver. + +## Options for the native execdriver + +You can configure the `native` (libcontainer) execdriver using options specified +with the `--exec-opt` flag. All the flag's options have the `native` prefix. A +single `native.cgroupdriver` option is available. + +The `native.cgroupdriver` option specifies the management of the container's +cgroups. You can specify `cgroupfs` or `systemd`. If you specify `systemd` and +it is not available, the system uses `cgroupfs`. By default, if no option is +specified, the execdriver first tries `systemd` and falls back to `cgroupfs`. +This example sets the execdriver to `cgroupfs`: + + $ sudo docker daemon --exec-opt native.cgroupdriver=cgroupfs + +Setting this option applies to all containers the daemon launches. + +## Daemon DNS options + +To set the DNS server for all Docker containers, use +`docker daemon --dns 8.8.8.8`. + +To set the DNS search domain for all Docker containers, use +`docker daemon --dns-search example.com`. + +## Insecure registries + +Docker considers a private registry either secure or insecure. In the rest of +this section, *registry* is used for *private registry*, and `myregistry:5000` +is a placeholder example for a private registry. + +A secure registry uses TLS and a copy of its CA certificate is placed on the +Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure +registry is either not using TLS (i.e., listening on plain text HTTP), or is +using TLS with a CA certificate not known by the Docker daemon. The latter can +happen when the certificate was not found under +`/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification +failed (i.e., wrong CA). + +By default, Docker assumes all, but local (see local registries below), +registries are secure. Communicating with an insecure registry is not possible +if Docker assumes that registry is secure. In order to communicate with an +insecure registry, the Docker daemon requires `--insecure-registry` in one of +the following two forms: + +* `--insecure-registry myregistry:5000` tells the Docker daemon that + myregistry:5000 should be considered insecure. +* `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries + whose domain resolve to an IP address is part of the subnet described by the + CIDR syntax, should be considered insecure. + +The flag can be used multiple times to allow multiple registries to be marked +as insecure. + +If an insecure registry is not marked as insecure, `docker pull`, +`docker push`, and `docker search` will result in an error message prompting +the user to either secure or pass the `--insecure-registry` flag to the Docker +daemon as described above. + +Local registries, whose IP address falls in the 127.0.0.0/8 range, are +automatically marked as insecure as of Docker 1.3.2. It is not recommended to +rely on this, as it may change in the future. + +## Legacy Registries + +Enabling `--disable-legacy-registry` forces a docker daemon to only interact with +registries which support the V2 protocol. Specifically, the daemon will not +attempt `push`, `pull` and `login` to v1 registries. The exception to this +is `search` which can still be performed on v1 registries. + +## Running a Docker daemon behind a HTTPS_PROXY + +When running inside a LAN that uses a `HTTPS` proxy, the Docker Hub +certificates will be replaced by the proxy's certificates. These certificates +need to be added to your Docker host's configuration: + +1. Install the `ca-certificates` package for your distribution +2. Ask your network admin for the proxy's CA certificate and append them to + `/etc/pki/tls/certs/ca-bundle.crt` +3. Then start your Docker daemon with `HTTPS_PROXY=http://username:password@proxy:port/ docker daemon`. + The `username:` and `password@` are optional - and are only needed if your + proxy is set up to require authentication. + +This will only add the proxy and authentication to the Docker daemon's requests - +your `docker build`s and running containers will need extra configuration to +use the proxy + +## Default Ulimits + +`--default-ulimit` allows you to set the default `ulimit` options to use for +all containers. It takes the same options as `--ulimit` for `docker run`. If +these defaults are not set, `ulimit` settings will be inherited, if not set on +`docker run`, from the Docker daemon. Any `--ulimit` options passed to +`docker run` will overwrite these defaults. + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to +set the maximum number of processes available to a user, not to a container. For details +please check the [run](run.md) reference. + +## Miscellaneous options + +IP masquerading uses address translation to allow containers without a public +IP to talk to other machines on the Internet. This may interfere with some +network topologies and can be disabled with --ip-masq=false. + +Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and +for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be +set like this: + + DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker daemon -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 + # or + export DOCKER_TMPDIR=/mnt/disk2/tmp + /usr/local/bin/docker daemon -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 + + diff --git a/docs/reference/commandline/diff.md b/docs/reference/commandline/diff.md new file mode 100644 index 00000000..ae5da058 --- /dev/null +++ b/docs/reference/commandline/diff.md @@ -0,0 +1,39 @@ + + +# diff + + Usage: docker diff CONTAINER + + Inspect changes on a container's filesystem + +List the changed files and directories in a container᾿s filesystem + There are 3 events that are listed in the `diff`: + +1. `A` - Add +2. `D` - Delete +3. `C` - Change + +For example: + + $ docker diff 7bb0e258aefe + + C /dev + A /dev/kmsg + C /etc + A /etc/mtab + A /go + A /go/src + A /go/src/github.com + A /go/src/github.com/docker + A /go/src/github.com/docker/docker + A /go/src/github.com/docker/docker/.git + .... diff --git a/docs/reference/commandline/docker_images.gif b/docs/reference/commandline/docker_images.gif new file mode 100644 index 00000000..5894ca27 Binary files /dev/null and b/docs/reference/commandline/docker_images.gif differ diff --git a/docs/reference/commandline/events.md b/docs/reference/commandline/events.md new file mode 100644 index 00000000..f7146213 --- /dev/null +++ b/docs/reference/commandline/events.md @@ -0,0 +1,136 @@ + + +# events + + Usage: docker events [OPTIONS] + + Get real time events from the server + + -f, --filter=[] Filter output based on conditions provided + --since="" Show all events created since timestamp + --until="" Stream events until this timestamp + +Docker containers will report the following events: + + create, destroy, die, export, kill, oom, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +The `--since` and `--until` parameters can be Unix timestamps, RFC3339 +dates or Go duration strings (e.g. `10m`, `1h30m`) computed relative to +client machine’s time. If you do not provide the --since option, the command +returns only new and/or live events. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would +like to use multiple filters, pass multiple flags (e.g., +`--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a *OR*; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display +events for container 588a23dac085 *OR* container a8f7720b8c22 + +Using multiple filters will be handled as a *AND*; for example +`--filter container=588a23dac085 --filter event=start` will display events for +container container 588a23dac085 *AND* the event type is *start* + +The currently supported filters are: + +* container +* event +* image + +## Examples + +You'll need two shells for this example. + +**Shell 1: Listening for events:** + + $ docker events + +**Shell 2: Start and Stop containers:** + + $ docker start 4386fb97867d + $ docker stop 4386fb97867d + $ docker stop 7805c1d35632 + +**Shell 1: (Again .. now showing events):** + + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + +**Show events in the past from a specified time:** + + $ docker events --since 1378216169 + 2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-03-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ docker events --since '2013-09-03' + 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start + 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ docker events --since '2013-09-03T15:49:29' + 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + +This example outputs all events that were generated in the last 3 minutes, +relative to the current time on the client machine: + + $ docker events --since '3m' + 2015-05-12T11:51:30.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2015-05-12T15:52:12.999999999Z07:00 4 4386fb97867d: (from ubuntu-1:14.04) stop + 2015-05-12T15:53:45.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2015-05-12T15:54:03.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + +**Filter events:** + + $ docker events --filter 'event=stop' + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-09-03T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ docker events --filter 'image=ubuntu-1:14.04' + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + + $ docker events --filter 'container=7805c1d35632' + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' + 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ docker events --filter 'container=container_1' --filter 'container=container_2' + 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + diff --git a/docs/reference/commandline/exec.md b/docs/reference/commandline/exec.md new file mode 100644 index 00000000..6ad6883a --- /dev/null +++ b/docs/reference/commandline/exec.md @@ -0,0 +1,55 @@ + + +# exec + + Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] + + Run a command in a running container + + -d, --detach=false Detached mode: run command in the background + -i, --interactive=false Keep STDIN open even if not attached + -t, --tty=false Allocate a pseudo-TTY + -u, --user= Username or UID (format: [:]) + +The `docker exec` command runs a new command in a running container. + +The command started using `docker exec` only runs while the container's primary +process (`PID 1`) is running, and it is not restarted if the container is +restarted. + +If the container is paused, then the `docker exec` command will fail with an error: + + $ docker pause test + test + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test + $ docker exec test ls + FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec + $ echo $? + 1 + +## Examples + + $ docker run --name ubuntu_bash --rm -i -t ubuntu bash + +This will create a container named `ubuntu_bash` and start a Bash session. + + $ docker exec -d ubuntu_bash touch /tmp/execWorks + +This will create a new file `/tmp/execWorks` inside the running container +`ubuntu_bash`, in the background. + + $ docker exec -it ubuntu_bash bash + +This will create a new Bash session in the container `ubuntu_bash`. + diff --git a/docs/reference/commandline/export.md b/docs/reference/commandline/export.md new file mode 100644 index 00000000..5b1812cb --- /dev/null +++ b/docs/reference/commandline/export.md @@ -0,0 +1,38 @@ + + +# export + + Usage: docker export [OPTIONS] CONTAINER + + Export the contents of a filesystem to a tar archive (streamed to STDOUT by default). + + -o, --output="" Write to a file, instead of STDOUT + + Produces a tarred repository to the standard output stream. + + + For example: + + $ docker export red_panda > latest.tar + + Or + + $ docker export --output="latest.tar" red_panda + +The `docker export` command does not export the contents of volumes associated +with the container. If a volume is mounted on top of an existing directory in +the container, `docker export` will export the contents of the *underlying* +directory, not the contents of the volume. + +Refer to [Backup, restore, or migrate data +volumes](/userguide/dockervolumes/#backup-restore-or-migrate-data-volumes) in +the user guide for examples on exporting data in a volume. diff --git a/docs/reference/commandline/history.md b/docs/reference/commandline/history.md new file mode 100644 index 00000000..a67587ec --- /dev/null +++ b/docs/reference/commandline/history.md @@ -0,0 +1,42 @@ + + +# history + + Usage: docker history [OPTIONS] IMAGE + + Show the history of an image + + -H, --human=true Print sizes and dates in human readable format + --no-trunc=false Don't truncate output + -q, --quiet=false Only show numeric IDs + +To see how the `docker:latest` image was built: + + $ docker history docker + IMAGE CREATED CREATED BY SIZE COMMENT + 3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B + 8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB + be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB + 4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB + 750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi ++++ +title = "images" +description = "The images command description and usage" +keywords = ["list, docker, images"] +[menu.main] +parent = "smn_cli" +weight=1 ++++ + + +# images + + Usage: docker images [OPTIONS] [REPOSITORY] + + List images + + -a, --all=false Show all images (default hides intermediate images) + --digests=false Show digests + -f, --filter=[] Filter output based on conditions provided + --help=false Print usage + --no-trunc=false Don't truncate output + -q, --quiet=false Only show numeric IDs + +The default `docker images` will show all top level +images, their repository and tags, and their virtual size. + +Docker images have intermediate layers that increase reusability, +decrease disk usage, and speed up `docker build` by +allowing each step to be cached. These intermediate layers are not shown +by default. + +The `VIRTUAL SIZE` is the cumulative space taken up by the image and all +its parent images. This is also the disk space used by the contents of the +Tar file created when you `docker save` an image. + +An image will be listed more than once if it has multiple repository names +or tags. This single image (identifiable by its matching `IMAGE ID`) +uses up the `VIRTUAL SIZE` listed only once. + +### Listing the most recently created images + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + 77af4d6b9913 19 hours ago 1.089 GB + committ latest b6fa739cedf5 19 hours ago 1.089 GB + 78a85c484f71 19 hours ago 1.089 GB + docker latest 30557a29d5ab 20 hours ago 1.089 GB + 5ed6274db6ce 24 hours ago 1.089 GB + postgres 9 746b819f315e 4 days ago 213.4 MB + postgres 9.3 746b819f315e 4 days ago 213.4 MB + postgres 9.3.5 746b819f315e 4 days ago 213.4 MB + postgres latest 746b819f315e 4 days ago 213.4 MB + + +## Listing the full length image IDs + + $ docker images --no-trunc + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB + committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB + 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB + docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB + 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB + 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB + f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB + tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB + 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB + +## Listing image digests + +Images that use the v2 or later format have a content-addressable identifier +called a `digest`. As long as the input used to generate the image is +unchanged, the digest value is predictable. To list image digest values, use +the `--digests` flag: + + $ docker images --digests + REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE + localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB + +When pushing or pulling to a 2.0 registry, the `push` or `pull` command +output includes the image digest. You can `pull` using a digest value. You can +also reference by digest in `create`, `run`, and `rmi` commands, as well as the +`FROM` image reference in a Dockerfile. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false) +* label (`label=` or `label==`) + +##### Untagged images + + $ docker images --filter "dangling=true" + + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + 8abc22fbb042 4 weeks ago 0 B + 48e5f45168b9 4 weeks ago 2.489 MB + bf747efa0e2f 4 weeks ago 0 B + 980fe10e5736 12 weeks ago 101.4 MB + dea752e4e117 12 weeks ago 101.4 MB + 511136ea3c5a 8 months ago 0 B + +This will display untagged images, that are the leaves of the images tree (not +intermediary layers). These images occur when a new build of an image takes the +`repo:tag` away from the image ID, leaving it untagged. A warning will be issued +if trying to remove an image when a container is presently using it. +By having this flag it allows for batch cleanup. + +Ready for use by `docker rmi ...`, like: + + $ docker rmi $(docker images -f "dangling=true" -q) + + 8abc22fbb042 + 48e5f45168b9 + bf747efa0e2f + 980fe10e5736 + dea752e4e117 + 511136ea3c5a + +NOTE: Docker will warn you if any containers exist that are using these untagged images. + + diff --git a/docs/reference/commandline/import.md b/docs/reference/commandline/import.md new file mode 100644 index 00000000..78d093ce --- /dev/null +++ b/docs/reference/commandline/import.md @@ -0,0 +1,58 @@ + + +# import + + Usage: docker import URL|- [REPOSITORY[:TAG]] + + Create an empty filesystem image and import the contents of the + tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then + optionally tag it. + + -c, --change=[] Apply specified Dockerfile instructions while importing the image + +URLs must start with `http` and point to a single file archive (.tar, +.tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a root filesystem. If +you would like to import from a local directory or archive, you can use +the `-` parameter to take the data from `STDIN`. + +The `--change` option will apply `Dockerfile` instructions to the image +that is created. +Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Examples + +**Import from a remote location:** + +This will create a new untagged image. + + $ docker import http://example.com/exampleimage.tgz + +**Import from a local file:** + +Import to docker via pipe and `STDIN`. + + $ cat exampleimage.tgz | docker import - exampleimagelocal:new + +**Import from a local directory:** + + $ sudo tar -c . | docker import - exampleimagedir + +**Import from a local directory with new configurations:** + + $ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir + +Note the `sudo` in this example – you must preserve +the ownership of the files (especially root ownership) during the +archiving with tar. If you are not root (or the sudo command) when you +tar, then the ownerships might not get preserved. + diff --git a/docs/reference/commandline/info.md b/docs/reference/commandline/info.md new file mode 100644 index 00000000..dc4653e8 --- /dev/null +++ b/docs/reference/commandline/info.md @@ -0,0 +1,57 @@ + + +# info + + + Usage: docker info + + Display system-wide information + +For example: + + $ docker -D info + Containers: 14 + Images: 52 + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Backing Filesystem: extfs + Dirs: 545 + Execution Driver: native-0.2 + Logging Driver: json-file + Kernel Version: 3.13.0-24-generic + Operating System: Ubuntu 14.04 LTS + CPUs: 1 + Name: prod-server-42 + ID: 7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS + Total Memory: 2 GiB + Debug mode (server): false + Debug mode (client): true + File Descriptors: 10 + Goroutines: 9 + System Time: Tue Mar 10 18:38:57 UTC 2015 + EventsListeners: 0 + Init Path: /usr/bin/docker + Docker Root Dir: /var/lib/docker + Http Proxy: http://test:test@localhost:8080 + Https Proxy: https://test:test@localhost:8080 + No Proxy: 9.81.1.160 + Username: svendowideit + Registry: [https://index.docker.io/v1/] + Labels: + storage=ssd + +The global `-D` option tells all `docker` commands to output debug information. + +When sending issue reports, please use `docker version` and `docker -D info` to +ensure we know how your setup is configured. + + diff --git a/docs/reference/commandline/inspect.md b/docs/reference/commandline/inspect.md new file mode 100644 index 00000000..c6c4721c --- /dev/null +++ b/docs/reference/commandline/inspect.md @@ -0,0 +1,76 @@ + + +# inspect + + Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] + + Return low-level information on a container or image + + -f, --format="" Format the output using the given go template + + --type=container|image Return JSON for specified type, permissible + values are "image" or "container" + +By default, this will render all results in a JSON array. If a format is +specified, the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +**Get an instance's IP address:** + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + + $ docker inspect --format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID + +**Get an instance's MAC Address:** + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + + $ docker inspect --format='{{.NetworkSettings.MacAddress}}' $INSTANCE_ID + +**Get an instance's log path:** + + $ docker inspect --format='{{.LogPath}}' $INSTANCE_ID + +**List All Port Bindings:** + +One can loop over arrays and maps in the results to produce simple text +output: + + $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID + +**Find a Specific Port Mapping:** + +The `.Field` syntax doesn't work when the field name begins with a +number, but the template language's `index` function does. The +`.NetworkSettings.Ports` section contains a map of the internal port +mappings to a list of external address/port objects, so to grab just the +numeric public port, you use `index` to find the specific port map, and +then `index` 0 contains the first object inside of that. Then we ask for +the `HostPort` field to get the public address. + + $ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID + +**Get config:** + +The `.Field` syntax doesn't work when the field contains JSON data, but +the template language's custom `json` function does. The `.config` +section contains complex JSON object, so to grab it as JSON, you use +`json` to convert the configuration object into JSON. + + $ docker inspect --format='{{json .config}}' $INSTANCE_ID + diff --git a/docs/reference/commandline/kill.md b/docs/reference/commandline/kill.md new file mode 100644 index 00000000..798b9cc9 --- /dev/null +++ b/docs/reference/commandline/kill.md @@ -0,0 +1,21 @@ + + +# kill + + Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] + + Kill a running container using SIGKILL or a specified signal + + -s, --signal="KILL" Signal to send to the container + +The main process inside the container will be sent `SIGKILL`, or any +signal specified with option `--signal`. diff --git a/docs/reference/commandline/load.md b/docs/reference/commandline/load.md new file mode 100644 index 00000000..d4154cd5 --- /dev/null +++ b/docs/reference/commandline/load.md @@ -0,0 +1,37 @@ + + +# load + + Usage: docker load [OPTIONS] + + Load an image from a tar archive or STDIN + + -i, --input="" Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz + +Loads a tarred repository from a file or the standard input stream. +Restores both images and tags. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + $ docker load < busybox.tar.gz + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ docker load --input fedora.tar + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB + diff --git a/docs/reference/commandline/login.md b/docs/reference/commandline/login.md new file mode 100644 index 00000000..5c2934a9 --- /dev/null +++ b/docs/reference/commandline/login.md @@ -0,0 +1,29 @@ + + +# login + + Usage: docker login [OPTIONS] [SERVER] + + Register or log in to a Docker registry server, if no server is + specified "https://index.docker.io/v1/" is the default. + + -e, --email="" Email + -p, --password="" Password + -u, --username="" Username + +If you want to login to a self-hosted registry you can specify this by +adding the server name. + + example: + $ docker login localhost:8080 + + diff --git a/docs/reference/commandline/logout.md b/docs/reference/commandline/logout.md new file mode 100644 index 00000000..3dc90278 --- /dev/null +++ b/docs/reference/commandline/logout.md @@ -0,0 +1,24 @@ + + +# logout + + Usage: docker logout [SERVER] + + Log out from a Docker registry, if no server is + specified "https://index.docker.io/v1/" is the default. + +For example: + + $ docker logout localhost:8080 + + + diff --git a/docs/reference/commandline/logs.md b/docs/reference/commandline/logs.md new file mode 100644 index 00000000..a2e69e4d --- /dev/null +++ b/docs/reference/commandline/logs.md @@ -0,0 +1,43 @@ + + +# logs + + Usage: docker logs [OPTIONS] CONTAINER + + Fetch the logs of a container + + -f, --follow=false Follow log output + --since="" Show logs since timestamp + -t, --timestamps=false Show timestamps + --tail="all" Number of lines to show from the end of the logs + +NOTE: this command is available only for containers with `json-file` logging +driver. + +The `docker logs` command batch-retrieves logs present at the time of execution. + +The `docker logs --follow` command will continue streaming the new output from +the container's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker logs --timestamp` commands will add an RFC3339Nano +timestamp, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps for are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `--since` option shows only the container logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Docker computes +the date relative to the client machine’s time. You can combine +the `--since` option with either or both of the `--follow` or `--tail` options. diff --git a/docs/reference/commandline/pause.md b/docs/reference/commandline/pause.md new file mode 100644 index 00000000..20ee0290 --- /dev/null +++ b/docs/reference/commandline/pause.md @@ -0,0 +1,27 @@ + + +# pause + + Usage: docker pause CONTAINER [CONTAINER...] + + Pause all processes within a container + +The `docker pause` command uses the cgroups freezer to suspend all processes in +a container. Traditionally, when suspending a process the `SIGSTOP` signal is +used, which is observable by the process being suspended. With the cgroups freezer +the process is unaware, and unable to capture, that it is being suspended, +and subsequently resumed. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) +for further details. + diff --git a/docs/reference/commandline/port.md b/docs/reference/commandline/port.md new file mode 100644 index 00000000..0a9a3fe9 --- /dev/null +++ b/docs/reference/commandline/port.md @@ -0,0 +1,33 @@ + + +# port + + Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] + + List port mappings for the CONTAINER, or lookup the public-facing port that is + NAT-ed to the PRIVATE_PORT + +You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or +just a specific mapping: + + $ docker ps test + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + $ docker port test + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + $ docker port test 7890/tcp + 0.0.0.0:4321 + $ docker port test 7890/udp + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + $ docker port test 7890 + 0.0.0.0:4321 diff --git a/docs/reference/commandline/ps.md b/docs/reference/commandline/ps.md new file mode 100644 index 00000000..9511336f --- /dev/null +++ b/docs/reference/commandline/ps.md @@ -0,0 +1,102 @@ + + +# ps + + Usage: docker ps [OPTIONS] + + List containers + + -a, --all=false Show all containers (default shows just running) + --before="" Show only container created before Id or Name + -f, --filter=[] Filter output based on conditions provided + -l, --latest=false Show the latest created container, include non-running + -n=-1 Show n last created containers, include non-running + --no-trunc=false Don't truncate output + -q, --quiet=false Only display numeric IDs + -s, --size=false Display total file sizes + --since="" Show created since Id or Name, include non-running + --format=[] Pretty-print containers using a Go template + +Running `docker ps --no-trunc` showing 2 linked containers. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp + d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db + +`docker ps` will show only running containers by default. To see all containers: +`docker ps -a` + +`docker ps` will group exposed ports into a single range if possible. E.g., a container that exposes TCP ports `100, 101, 102` will display `100-102/tcp` in the `PORTS` column. + +## Filtering + +The filtering flag (`-f` or `--filter)` format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* id (container's id) +* label (`label=` or `label==`) +* name (container's name) +* exited (int - the code of exited containers. Only useful with `--all`) +* status (created|restarting|running|paused|exited) + +## Successfully exited containers + + $ docker ps -a --filter 'exited=0' + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey + 106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani + 48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds + +This shows all the containers that have exited with status of '0' + +## Formatting + +The formatting option (`--format`) will pretty-print container output using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +---- | ---- +`.ID` | Container ID +`.Image` | Image ID +`.Command` | Quoted command +`.CreatedAt` | Time when the container was created. +`.RunningFor` | Elapsed time since the container was started. +`.Ports` | Exposed ports. +`.Status` | Container status. +`.Size` | Container disk size. +`.Labels` | All labels asigned to the container. +`.Label` | Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` + +When using the `--format` option, the `ps` command will either output the data exactly as the template +declares or, when using the `table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the `ID` and `Command` +entries separated by a colon for all running containers: + + $ docker ps --format "{{.ID}}: {{.Command}}" + a87ecb4f327c: /bin/sh -c #(nop) MA + 01946d9d34d8: /bin/sh -c #(nop) MA + c1d3b0166030: /bin/sh -c yum -y up + 41d50ecd2f57: /bin/sh -c #(nop) MA + +To list all running containers with their labels in a table format you can use: + + $ docker ps --format "table {{.ID}}\t{{.Labels}}" + CONTAINER ID LABELS + a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd + 01946d9d34d8 + c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 + 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd diff --git a/docs/reference/commandline/pull.md b/docs/reference/commandline/pull.md new file mode 100644 index 00000000..53b0d4cb --- /dev/null +++ b/docs/reference/commandline/pull.md @@ -0,0 +1,52 @@ + + +# pull + + Usage: docker pull [OPTIONS] NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] + + Pull an image or a repository from the registry + + -a, --all-tags=false Download all tagged images in the repository + --disable-content-trust=true Skip image verification + +Most of your images will be created on top of a base image from the +[Docker Hub](https://hub.docker.com) registry. + +[Docker Hub](https://hub.docker.com) contains many pre-built images that you +can `pull` and try without needing to define and configure your own. + +It is also possible to manually specify the path of a registry to pull from. +For example, if you have set up a local registry, you can specify its path to +pull from it. A repository path is similar to a URL, but does not contain +a protocol specifier (`https://`, for example). + +To download a particular image, or set of images (i.e., a repository), +use `docker pull`: + + $ docker pull debian + # will pull the debian:latest image and its intermediate layers + $ docker pull debian:testing + # will pull the image named debian:testing and any intermediate + # layers it is based on. + $ docker pull debian@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + # will pull the image from the debian repository with the digest + # sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + # and any intermediate layers it is based on. + # (Typically the empty `scratch` image, a MAINTAINER layer, + # and the un-tarred base). + $ docker pull --all-tags centos + # will pull all the images from the centos repository + $ docker pull registry.hub.docker.com/debian + # manually specifies the path to the default Docker registry. This could + # be replaced with the path to a local registry to pull from another source. + # sudo docker pull myhub.com:8080/test-image + diff --git a/docs/reference/commandline/push.md b/docs/reference/commandline/push.md new file mode 100644 index 00000000..7f88887d --- /dev/null +++ b/docs/reference/commandline/push.md @@ -0,0 +1,21 @@ + + +# push + + Usage: docker push NAME[:TAG] + + Push an image or a repository to the registry + + --disable-content-trust=true Skip image signing + +Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) +registry or to a self-hosted one. diff --git a/docs/reference/commandline/rename.md b/docs/reference/commandline/rename.md new file mode 100644 index 00000000..43115ff2 --- /dev/null +++ b/docs/reference/commandline/rename.md @@ -0,0 +1,18 @@ + + +# rename + + Usage: docker rename OLD_NAME NEW_NAME + + rename a existing container to a NEW_NAME + +The `docker rename` command allows the container to be renamed to a different name. diff --git a/docs/reference/commandline/restart.md b/docs/reference/commandline/restart.md new file mode 100644 index 00000000..0b668eec --- /dev/null +++ b/docs/reference/commandline/restart.md @@ -0,0 +1,19 @@ + + +# restart + + Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] + + Restart a running container + + -t, --time=10 Seconds to wait for stop before killing the container + diff --git a/docs/reference/commandline/rm.md b/docs/reference/commandline/rm.md new file mode 100644 index 00000000..3e4f0716 --- /dev/null +++ b/docs/reference/commandline/rm.md @@ -0,0 +1,48 @@ + + +# rm + + Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] + + Remove one or more containers + + -f, --force=false Force the removal of a running container (uses SIGKILL) + -l, --link=false Remove the specified link + -v, --volumes=false Remove the volumes associated with the container + +## Examples + + $ docker rm /redis + /redis + +This will remove the container referenced under the link +`/redis`. + + $ docker rm --link /webapp/redis + /webapp/redis + +This will remove the underlying link between `/webapp` and the `/redis` +containers removing all network communication. + + $ docker rm --force redis + redis + +The main process inside the container referenced under the link `/redis` will receive +`SIGKILL`, then the container will be removed. + + $ docker rm $(docker ps -a -q) + +This command will delete all stopped containers. The command +`docker ps -a -q` will return all existing container IDs and pass them to +the `rm` command which will delete them. Any running containers will not be +deleted. + diff --git a/docs/reference/commandline/rmi.md b/docs/reference/commandline/rmi.md new file mode 100644 index 00000000..e699be12 --- /dev/null +++ b/docs/reference/commandline/rmi.md @@ -0,0 +1,76 @@ + + +# rmi + + Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] + + Remove one or more images + + -f, --force=false Force removal of the image + --no-prune=false Do not delete untagged parents + + +You can remove an image using its short or long ID, its tag, or its digest. If +an image has one or more tag or digest reference, you must remove all of them +before the image is removed. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + + $ docker rmi fd484f19954f + Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force + 2013/12/11 05:47:16 Error: failed to remove one or more images + + $ docker rmi test1 + Untagged: test1:latest + $ docker rmi test2 + Untagged: test2:latest + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + $ docker rmi test + Untagged: test:latest + Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 + +If you use the `-f` flag and specify the image's short or long ID, then this +command untags and removes all images that match the specified ID. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + + $ docker rmi -f fd484f19954f + Untagged: test1:latest + Untagged: test:latest + Untagged: test2:latest + Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 + +An image pulled by digest has no tag associated with it: + + $ docker images --digests + REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE + localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB + +To remove an image using its digest: + + $ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 + Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 + Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b + diff --git a/docs/reference/commandline/run.md b/docs/reference/commandline/run.md new file mode 100644 index 00000000..ab900d41 --- /dev/null +++ b/docs/reference/commandline/run.md @@ -0,0 +1,523 @@ + + +# run + + Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + + Run a command in a new container + + -a, --attach=[] Attach to STDIN, STDOUT or STDERR + --add-host=[] Add a custom host-to-IP mapping (host:ip) + --blkio-weight=0 Block IO weight (relative weight) + -c, --cpu-shares=0 CPU shares (relative weight) + --cap-add=[] Add Linux capabilities + --cap-drop=[] Drop Linux capabilities + --cgroup-parent="" Optional parent cgroup for the container + --cidfile="" Write the container ID to the file + --cpu-period=0 Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota=0 Limit CPU CFS (Completely Fair Scheduler) quota + --cpuset-cpus="" CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1) + -d, --detach=false Run container in background and print container ID + --device=[] Add a host device to the container + --dns=[] Set custom DNS servers + --dns-search=[] Set custom DNS search domains + -e, --env=[] Set environment variables + --entrypoint="" Overwrite the default ENTRYPOINT of the image + --env-file=[] Read in a file of environment variables + --expose=[] Expose a port or a range of ports + --group-add=[] Add additional groups to run as + -h, --hostname="" Container host name + --help=false Print usage + -i, --interactive=false Keep STDIN open even if not attached + --ipc="" IPC namespace to use + -l, --label=[] Set metadata on the container (e.g., --label=com.example.key=value) + --label-file=[] Read in a file of labels (EOL delimited) + --link=[] Add link to another container + --log-driver="" Logging driver for container + --log-opt=[] Log driver specific options + --lxc-conf=[] Add custom lxc options + -m, --memory="" Memory limit + --mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33) + --memory-swap="" Total memory (memory + swap), '-1' to disable swap + --memory-swappiness="" Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + --name="" Assign a name to the container + --net="bridge" Set the Network mode for the container + --oom-kill-disable=false Whether to disable OOM Killer for the container or not + -P, --publish-all=false Publish all exposed ports to random ports + -p, --publish=[] Publish a container's port(s) to the host + --pid="" PID namespace to use + --privileged=false Give extended privileges to this container + --read-only=false Mount the container's root filesystem as read only + --restart="no" Restart policy (no, on-failure[:max-retry], always) + --rm=false Automatically remove the container when it exits + --security-opt=[] Security Options + --sig-proxy=true Proxy received signals to the process + -t, --tty=false Allocate a pseudo-TTY + -u, --user="" Username or UID (format: [:]) + --ulimit=[] Ulimit options + --disable-content-trust=true Skip image verification + --uts="" UTS namespace to use + -v, --volume=[] Bind mount a volume + --volumes-from=[] Mount volumes from the specified container(s) + -w, --workdir="" Working directory inside the container + +The `docker run` command first `creates` a writeable container layer over the +specified image, and then `starts` it using the specified command. That is, +`docker run` is equivalent to the API `/containers/create` then +`/containers/(id)/start`. A stopped container can be restarted with all its +previous changes intact using `docker start`. See `docker ps -a` to view a list +of all containers. + +There is detailed information about `docker run` in the [Docker run reference]( +/reference/run/). + +The `docker run` command can be used in combination with `docker commit` to +[*change the command that a container runs*](/reference/commandline/commit). + +See the [Docker User Guide](/userguide/dockerlinks/) for more detailed +information about the `--expose`, `-p`, `-P` and `--link` parameters, +and linking containers. + +## Examples + + $ docker run --name test -it debian + root@d6c0fe130dba:/# exit 13 + $ echo $? + 13 + $ docker ps -a | grep test + d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test + +This example runs a container named `test` using the `debian:latest` +image. The `-it` instructs Docker to allocate a pseudo-TTY connected to +the container's stdin; creating an interactive `bash` shell in the container. +In the example, the `bash` shell is quit by entering +`exit 13`. This exit code is passed on to the caller of +`docker run`, and is recorded in the `test` container's metadata. + + $ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" + +This will create a container and print `test` to the console. The `cidfile` +flag makes Docker attempt to create a new file and write the container ID to it. +If the file exists already, Docker will return an error. Docker will close this +file when `docker run` exits. + + $ docker run -t -i --rm ubuntu bash + root@bc338942ef20:/# mount -t tmpfs none /mnt + mount: permission denied + +This will *not* work, because by default, most potentially dangerous kernel +capabilities are dropped; including `cap_sys_admin` (which is required to mount +filesystems). However, the `--privileged` flag will allow it to run: + + $ docker run --privileged ubuntu bash + root@50e3f57e16e6:/# mount -t tmpfs none /mnt + root@50e3f57e16e6:/# df -h + Filesystem Size Used Avail Use% Mounted on + none 1.9G 0 1.9G 0% /mnt + +The `--privileged` flag gives *all* capabilities to the container, and it also +lifts all the limitations enforced by the `device` cgroup controller. In other +words, the container can then do almost everything that the host can do. This +flag exists to allow special use-cases, like running Docker within Docker. + + $ docker run -w /path/to/dir/ -i -t ubuntu pwd + +The `-w` lets the command being executed inside directory given, here +`/path/to/dir/`. If the path does not exists it is created inside the container. + + $ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd + +The `-v` flag mounts the current working directory into the container. The `-w` +lets the command being executed inside the current working directory, by +changing into the directory to the value returned by `pwd`. So this +combination executes the command using the container, but inside the +current working directory. + + $ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash + +When the host directory of a bind-mounted volume doesn't exist, Docker +will automatically create this directory on the host for you. In the +example above, Docker will create the `/doesnt/exist` +folder before starting your container. + + $ docker run --read-only -v /icanwrite busybox touch /icanwrite here + +Volumes can be used in combination with `--read-only` to control where +a container writes files. The `--read-only` flag mounts the container's root +filesystem as read only prohibiting writes to locations other than the +specified volumes for the container. + + $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh + +By bind-mounting the docker unix socket and statically linked docker +binary (such as that provided by [https://get.docker.com]( +https://get.docker.com)), you give the container the full access to create and +manipulate the host's Docker daemon. + + $ docker run -p 127.0.0.1:80:8080 ubuntu bash + +This binds port `8080` of the container to port `80` on `127.0.0.1` of +the host machine. The [Docker User Guide](/userguide/dockerlinks/) +explains in detail how to manipulate ports in Docker. + + $ docker run --expose 80 ubuntu bash + +This exposes port `80` of the container for use within a link without +publishing the port to the host system's interfaces. The [Docker User +Guide](/userguide/dockerlinks) explains in detail how to manipulate +ports in Docker. + + $ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash + +This sets environmental variables in the container. For illustration all three +flags are shown here. Where `-e`, `--env` take an environment variable and +value, or if no `=` is provided, then that variable's current value is passed +through (i.e. `$MYVAR1` from the host is set to `$MYVAR1` in the container). +When no `=` is provided and that variable is not defined in the client's +environment then that variable will be removed from the container's list of +environment variables. +All three flags, `-e`, `--env` and `--env-file` can be repeated. + +Regardless of the order of these three flags, the `--env-file` are processed +first, and then `-e`, `--env` flags. This way, the `-e` or `--env` will +override variables as needed. + + $ cat ./env.list + TEST_FOO=BAR + $ docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO + TEST_FOO=This is a test + +The `--env-file` flag takes a filename as an argument and expects each line +to be in the `VAR=VAL` format, mimicking the argument passed to `--env`. Comment +lines need only be prefixed with `#` + +An example of a file passed with `--env-file` + + $ cat ./env.list + TEST_FOO=BAR + + # this is a comment + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + _TEST_BAR=FOO + TEST_APP_42=magic + helloWorld=true + # 123qwe=bar <- is not valid + + # pass through this variable from the caller + TEST_PASSTHROUGH + $ TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + HOSTNAME=5198e0745561 + TEST_FOO=BAR + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + _TEST_BAR=FOO + TEST_APP_42=magic + helloWorld=true + TEST_PASSTHROUGH=howdy + HOME=/root + + $ docker run --env-file ./env.list busybox env + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + HOSTNAME=5198e0745561 + TEST_FOO=BAR + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + _TEST_BAR=FOO + TEST_APP_42=magic + helloWorld=true + TEST_PASSTHROUGH= + HOME=/root + +> **Note**: Environment variables names must consist solely of letters, numbers, +> and underscores - and cannot start with a number. + +A label is a a `key=value` pair that applies metadata to a container. To label a container with two labels: + + $ docker run -l my-label --label com.example.foo=bar ubuntu bash + +The `my-label` key doesn't specify a value so the label defaults to an empty +string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). + +The `key=value` must be unique to avoid overwriting the label value. If you +specify labels with identical keys but different values, each subsequent value +overwrites the previous. Docker uses the last `key=value` you supply. + +Use the `--label-file` flag to load multiple labels from a file. Delimit each +label in the file with an EOL mark. The example below loads labels from a +labels file in the current directory: + + $ docker run --label-file ./labels ubuntu bash + +The label-file format is similar to the format for loading environment +variables. (Unlike environment variables, labels are not visible to processes +running inside a container.) The following example illustrates a label-file +format: + + com.example.label1="a label" + + # this is a comment + com.example.label2=another\ label + com.example.label3 + +You can load multiple label-files by supplying multiple `--label-file` flags. + +For additional information on working with labels, see [*Labels - custom +metadata in Docker*](/userguide/labels-custom-metadata/) in the Docker User +Guide. + + $ docker run --link /redis:redis --name console ubuntu bash + +The `--link` flag will link the container named `/redis` into the newly +created container with the alias `redis`. The new container can access the +network and environment of the `redis` container via environment variables. +The `--link` flag will also just accept the form `` in which case +the alias will match the name. For instance, you could have written the previous +example as: + + $ docker run --link redis --name console ubuntu bash + +The `--name` flag will assign the name `console` to the newly created +container. + + $ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd + +The `--volumes-from` flag mounts all the defined volumes from the referenced +containers. Containers can be specified by repetitions of the `--volumes-from` +argument. The container ID may be optionally suffixed with `:ro` or `:rw` to +mount the volumes in read-only or read-write mode, respectively. By default, +the volumes are mounted in the same mode (read write or read only) as +the reference container. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change the label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` +or `STDERR`. This makes it possible to manipulate the output and input as +needed. + + $ echo "test" | docker run -i -a stdin ubuntu cat - + +This pipes data into a container and prints the container's ID by attaching +only to the container's `STDIN`. + + $ docker run -a stderr ubuntu echo test + +This isn't going to print anything unless there's an error because we've +only attached to the `STDERR` of the container. The container's logs +still store what's been written to `STDERR` and `STDOUT`. + + $ cat somefile | docker run -i -a stdin mybuilder dobuild + +This is how piping a file into a container could be done for a build. +The container's ID will be printed after the build is done and the build +logs could be retrieved using `docker logs`. This is +useful if you need to pipe a file or something else into a container and +retrieve the container's ID once the container has finished running. + + $ docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo} + brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc + brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd + crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo + +It is often necessary to directly expose devices to a container. The `--device` +option enables that. For example, a specific block storage device or loop +device or audio device can be added to an otherwise unprivileged container +(without the `--privileged` flag) and have the application directly access it. + +By default, the container will be able to `read`, `write` and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` +flag: + + + $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ docker run --device=/dev/sda:/dev/xvdc:ro --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted + +> **Note:** +> `--device` cannot be safely used with ephemeral devices. Block devices +> that may be removed should not be added to untrusted containers with +> `--device`. + +**A complete example:** + + $ docker run -d --name static static-web-files sh + $ docker run -d --expose=8098 --name riak riakserver + $ docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver + $ docker run -d -p 1443:443 --dns=10.0.0.1 --dns-search=dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver + $ docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log + +This example shows five containers that might be set up to test a web +application change: + +1. Start a pre-prepared volume image `static-web-files` (in the background) + that has CSS, image and static HTML in it, (with a `VOLUME` instruction in + the Dockerfile to allow the web server to use those files); +2. Start a pre-prepared `riakserver` image, give the container name `riak` and + expose port `8098` to any containers that link to it; +3. Start the `appserver` image, restricting its memory usage to 100MB, setting + two environment variables `DEVELOPMENT` and `BRANCH` and bind-mounting the + current directory (`$(pwd)`) in the container in read-only mode as `/app/bin`; +4. Start the `webserver`, mapping port `443` in the container to port `1443` on + the Docker server, setting the DNS server to `10.0.0.1` and DNS search + domain to `dev.org`, creating a volume to put the log files into (so we can + access it from another container), then importing the files from the volume + exposed by the `static` container, and linking to all exposed ports from + `riak` and `app`. Lastly, we set the hostname to `web.sven.dev.org` so its + consistent with the pre-generated SSL certificate; +5. Finally, we create a container that runs `tail -f access.log` using the logs + volume from the `web` container, setting the workdir to `/var/log/httpd`. The + `--rm` option means that when the container exits, the container's layer is + removed. + +## Restart policies + +Use Docker's `--restart` to specify a container's *restart policy*. A restart +policy controls whether the Docker daemon restarts a container after exit. +Docker supports the following restart policies: + + + + + + + + + + + + + + + + + + + + + + +
PolicyResult
no + Do not automatically restart the container when it exits. This is the + default. +
+ + on-failure[:max-retries] + + + Restart only if the container exits with a non-zero exit status. + Optionally, limit the number of restart retries the Docker + daemon attempts. +
always + Always restart the container regardless of the exit status. + When you specify always, the Docker daemon will try to restart + the container indefinitely. +
+ + $ docker run --restart=always redis + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + +More detailed information on restart policies can be found in the +[Restart Policies (--restart)](/reference/run/#restart-policies-restart) +section of the Docker run reference page. + +## Adding entries to a container hosts file + +You can add other hosts into a container's `/etc/hosts` file by using one or +more `--add-host` flags. This example adds a static address for a host named +`docker`: + + $ docker run --add-host=docker:10.180.0.1 --rm -it debian + $$ ping docker + PING docker (10.180.0.1): 48 data bytes + 56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms + 56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms + ^C--- docker ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms + +Sometimes you need to connect to the Docker host from within your +container. To enable this, pass the Docker host's IP address to +the container using the `--add-host` flag. To find the host's address, +use the `ip addr show` command. + +The flags you pass to `ip addr show` depend on whether you are +using IPv4 or IPv6 networking in your containers. Use the following +flags for IPv4 address retrieval for a network device named `eth0`: + + $ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1` + $ docker run --add-host=docker:${HOSTIP} --rm -it debian + +For IPv6 use the `-6` flag instead of the `-4` flag. For other network +devices, replace `eth0` with the correct device name (for example `docker0` +for the bridge device). + +### Setting ulimits in a container + +Since setting `ulimit` settings in a container requires extra privileges not +available in the default container, you can set these using the `--ulimit` flag. +`--ulimit` is specified with a soft and hard limit as such: +`=[:]`, for example: + + $ docker run --ulimit nofile=1024:1024 --rm debian ulimit -n + 1024 + +> **Note:** +> If you do not provide a `hard limit`, the `soft limit` will be used +> for both values. If no `ulimits` are set, they will be inherited from +> the default `ulimits` set on the daemon. `as` option is disabled now. +> In other words, the following script is not supported: +> `$ docker run -it --ulimit as=1024 fedora /bin/bash` + +The values are sent to the appropriate `syscall` as they are set. +Docker doesn't perform any byte conversion. Take this into account when setting the values. + +#### For `nproc` usage: + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the +maximum number of processes available to a user, not to a container. For example, start four +containers with `daemon` user: + + + docker run -d -u daemon --ulimit nproc=3 busybox top + docker run -d -u daemon --ulimit nproc=3 busybox top + docker run -d -u daemon --ulimit nproc=3 busybox top + docker run -d -u daemon --ulimit nproc=3 busybox top + +The 4th container fails and reports "[8] System error: resource temporarily unavailable" error. +This fails because the caller set `nproc=3` resulting in the first three containers using up +the three processes quota set for the `daemon` user. diff --git a/docs/reference/commandline/save.md b/docs/reference/commandline/save.md new file mode 100644 index 00000000..83913c09 --- /dev/null +++ b/docs/reference/commandline/save.md @@ -0,0 +1,37 @@ + + +# save + + Usage: docker save [OPTIONS] IMAGE [IMAGE...] + + Save an image(s) to a tar archive (streamed to STDOUT by default) + + -o, --output="" Write to a file, instead of STDOUT + +Produces a tarred repository to the standard output stream. +Contains all parent layers, and all tags + versions, or specified `repo:tag`, for +each argument provided. + +It is used to create a backup that can then be used with `docker load` + + $ docker save busybox > busybox.tar + $ ls -sh busybox.tar + 2.7M busybox.tar + $ docker save --output busybox.tar busybox + $ ls -sh busybox.tar + 2.7M busybox.tar + $ docker save -o fedora-all.tar fedora + $ docker save -o fedora-latest.tar fedora:latest + +It is even useful to cherry-pick particular tags of an image repository + + $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy diff --git a/docs/reference/commandline/search.md b/docs/reference/commandline/search.md new file mode 100644 index 00000000..35b4ed4d --- /dev/null +++ b/docs/reference/commandline/search.md @@ -0,0 +1,29 @@ + + +# search + + Usage: docker search [OPTIONS] TERM + + Search the Docker Hub for images + + --automated=false Only show automated builds + --no-trunc=false Don't truncate output + -s, --stars=0 Only displays with at least x stars + +Search [Docker Hub](https://hub.docker.com) for images + +See [*Find Public Images on Docker Hub*](/userguide/dockerrepos/#searching-for-images) for +more details on finding shared images from the command line. + +> **Note:** +> Search queries will only return up to 25 results + diff --git a/docs/reference/commandline/start.md b/docs/reference/commandline/start.md new file mode 100644 index 00000000..c23f5b5f --- /dev/null +++ b/docs/reference/commandline/start.md @@ -0,0 +1,20 @@ + + +# start + + Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] + + Start one or more stopped containers + + -a, --attach=false Attach STDOUT/STDERR and forward signals + -i, --interactive=false Attach container's STDIN + diff --git a/docs/reference/commandline/stats.md b/docs/reference/commandline/stats.md new file mode 100644 index 00000000..7e67099e --- /dev/null +++ b/docs/reference/commandline/stats.md @@ -0,0 +1,34 @@ + + +# stats + + Usage: docker stats CONTAINER [CONTAINER...] + + Display a live stream of one or more containers' resource usage statistics + + --help=false Print usage + --no-stream=false Disable streaming stats and only pull the first result + +Running `docker stats` on multiple containers + + $ docker stats redis1 redis2 + CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O + redis1 0.07% 796 KB/64 MB 1.21% 788 B/648 B + redis2 0.07% 2.746 MB/64 MB 4.29% 1.266 KB/648 B + + +The `docker stats` command will only return a live stream of data for running +containers. Stopped containers will not return any data. + +> **Note:** +> If you want more detailed information about a container's resource +> usage, use the API endpoint. \ No newline at end of file diff --git a/docs/reference/commandline/stop.md b/docs/reference/commandline/stop.md new file mode 100644 index 00000000..d2ef8f85 --- /dev/null +++ b/docs/reference/commandline/stop.md @@ -0,0 +1,22 @@ + + +# stop + + Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] + + Stop a running container by sending SIGTERM and then SIGKILL after a + grace period + + -t, --time=10 Seconds to wait for stop before killing it + +The main process inside the container will receive `SIGTERM`, and after a grace +period, `SIGKILL`. \ No newline at end of file diff --git a/docs/reference/commandline/tag.md b/docs/reference/commandline/tag.md new file mode 100644 index 00000000..57b2aa01 --- /dev/null +++ b/docs/reference/commandline/tag.md @@ -0,0 +1,21 @@ + + +# tag + + Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] + + Tag an image into a repository + + -f, --force=false Force + +You can group your images together using names and tags, and then upload them +to [*Share Images via Repositories*](/userguide/dockerrepos/#contributing-to-docker-hub). diff --git a/docs/reference/commandline/top.md b/docs/reference/commandline/top.md new file mode 100644 index 00000000..c56beb31 --- /dev/null +++ b/docs/reference/commandline/top.md @@ -0,0 +1,16 @@ + + +# top + + Usage: docker top CONTAINER [ps OPTIONS] + + Display the running processes of a container \ No newline at end of file diff --git a/docs/reference/commandline/unpause.md b/docs/reference/commandline/unpause.md new file mode 100644 index 00000000..aab7d008 --- /dev/null +++ b/docs/reference/commandline/unpause.md @@ -0,0 +1,23 @@ + + +# unpause + + Usage: docker unpause CONTAINER [CONTAINER...] + + Unpause all processes within a container + +The `docker unpause` command uses the cgroups freezer to un-suspend all +processes in a container. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) +for further details. diff --git a/docs/reference/commandline/version.md b/docs/reference/commandline/version.md new file mode 100644 index 00000000..a976546e --- /dev/null +++ b/docs/reference/commandline/version.md @@ -0,0 +1,56 @@ + + +# version + + Usage: docker version + + Show the Docker version information. + + -f, --format="" Format the output using the given go template + +By default, this will render all version information in an easy to read +layout. If a format is specified, the given template will be executed instead. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +**Default output:** + + $ docker version + Client: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + + Server: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + +**Get server version:** + + $ docker version --format '{{.Server.Version}}' + 1.8.0 + +**Dump raw data:** + + $ docker version --format '{{json .}}' + {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} + diff --git a/docs/reference/commandline/wait.md b/docs/reference/commandline/wait.md new file mode 100644 index 00000000..847f3007 --- /dev/null +++ b/docs/reference/commandline/wait.md @@ -0,0 +1,16 @@ + + +# wait + + Usage: docker wait CONTAINER [CONTAINER...] + + Block until a container stops, then print its exit code. \ No newline at end of file diff --git a/docs/reference/glossary.md b/docs/reference/glossary.md new file mode 100644 index 00000000..1e2f2884 --- /dev/null +++ b/docs/reference/glossary.md @@ -0,0 +1,208 @@ + + +# Glossary + +A list of terms used around the Docker project. + +## aufs + +aufs (advanced multi layered unification filesystem) is a Linux [filesystem](#filesystem) that +Docker supports as a storage backend. It implements the +[union mount](http://en.wikipedia.org/wiki/Union_mount) for Linux file systems. + +## boot2docker + +[boot2docker](http://boot2docker.io/) is a lightweight Linux distribution made +specifically to run Docker containers. It is a common choice for a [VM](#virtual-machine) +to run Docker on Windows and Mac OS X. + +boot2docker can also refer to the boot2docker management tool on Windows and +Mac OS X which manages the boot2docker VM. + +## btrfs + +btrfs (B-tree file system) is a Linux [filesystem](#filesystem) that Docker +supports as a storage backend. It is a [copy-on-write](http://en.wikipedia.org/wiki/Copy-on-write) +filesystem. + +## build + +build is the process of building Docker images using a [Dockerfile](#dockerfile). +The build uses a Dockerfile and a "context". The context is the set of files in the +directory in which the image is built. + +## cgroups + +cgroups is a Linux kernel feature that limits, accounts for, and isolates +the resource usage (CPU, memory, disk I/O, network, etc.) of a collection +of processes. Docker relies on cgroups to control and isolate resource limits. + +*Also known as : control groups* + +## Compose + +[Compose](https://github.com/docker/compose) is a tool for defining and +running complex applications with Docker. With compose, you define a +multi-container application in a single file, then spin your +application up in a single command which does everything that needs to +be done to get it running. + +*Also known as : docker-compose, fig* + +## container + +A container is a runtime instance of a [docker image](#image). + +A Docker container consists of + +- A Docker image +- Execution environment +- A standard set of instructions + +The concept is borrowed from Shipping Containers, which define a standard to ship +goods globally. Docker defines a standard to ship software. + +## data volume + +A data volume is a specially-designated directory within one or more containers +that bypasses the Union File System. Data volumes are designed to persist data, +independent of the container's life cycle. Docker therefore never automatically +delete volumes when you remove a container, nor will it "garbage collect" +volumes that are no longer referenced by a container. + + +## Docker + +The term Docker can refer to + +- The Docker project as a whole, which is a platform for developers and sysadmins to +develop, ship, and run applications +- The docker daemon process running on the host which manages images and containers + + +## Docker Hub + +The [Docker Hub](https://hub.docker.com/) is a centralized resource for working with +Docker and its components. It provides the following services: + +- Docker image hosting +- User authentication +- Automated image builds and work-flow tools such as build triggers and web hooks +- Integration with GitHub and Bitbucket + + +## Dockerfile + +A Dockerfile is a text document that contains all the commands you would +normally execute manually in order to build a Docker image. Docker can +build images automatically by reading the instructions from a Dockerfile. + +## filesystem + +A file system is the method an operating system uses to name files +and assign them locations for efficient storage and retrieval. + +Examples : + +- Linux : ext4, aufs, btrfs, zfs +- Windows : NTFS +- OS X : HFS+ + +## image + +Docker images are the basis of [containers](#container). An Image is an +ordered collection of root filesystem changes and the corresponding +execution parameters for use within a container runtime. An image typically +contains a union of layered filesystems stacked on top of each other. An image +does not have state and it never changes. + +## libcontainer + +libcontainer provides a native Go implementation for creating containers with +namespaces, cgroups, capabilities, and filesystem access controls. It allows +you to manage the lifecycle of the container performing additional operations +after the container is created. + +## link + +links provide an interface to connect Docker containers running on the same host +to each other without exposing the hosts' network ports. When you set up a link, +you create a conduit between a source container and a recipient container. +The recipient can then access select data about the source. To create a link, +you can use the `--link` flag. + +## Machine + +[Machine](https://github.com/docker/machine) is a Docker tool which +makes it really easy to create Docker hosts on your computer, on +cloud providers and inside your own data center. It creates servers, +installs Docker on them, then configures the Docker client to talk to them. + +*Also known as : docker-machine* + +## overlay + +OverlayFS is a [filesystem](#filesystem) service for Linux which implements a +[union mount](http://en.wikipedia.org/wiki/Union_mount) for other file systems. +It is supported by the Docker daemon as a storage driver. + +## registry + +A Registry is a hosted service containing [repositories](#repository) of [images](#image) +which responds to the Registry API. + +The default registry can be accessed using a browser at [Docker Hub](#docker-hub) +or using the `docker search` command. + +## repository + +A repository is a set of Docker images. A repository can be shared by pushing it +to a [registry](#registry) server. The different images in the repository can be +labeled using [tags](#tag). + +Here is an example of the shared [nginx repository](https://registry.hub.docker.com/_/nginx/) +and its [tags](https://registry.hub.docker.com/_/nginx/tags/manage/) + +## Swarm + +[Swarm](https://github.com/docker/swarm) is a native clustering tool for Docker. +Swarm pools together several Docker hosts and exposes them as a single virtual +Docker host. It serves the standard Docker API, so any tool that already works +with Docker can now transparently scale up to multiple hosts. + +*Also known as : docker-swarm* + +## tag + +A tag is a label applied to a Docker image in a [repository](#repository). +tags are how various images in a repository are distinguished from each other. + +*Note : This label is not related to the key=value labels set for docker daemon* + +## Union file system + +Union file systems, or UnionFS, are file systems that operate by creating layers, making them +very lightweight and fast. Docker uses union file systems to provide the building +blocks for containers. + + +## Virtual Machine + +A Virtual Machine is a program that emulates a complete computer and imitates dedicated hardware. +It shares physical hardware resources with other users but isolates the operating system. The +end user has the same experience on a Virtual Machine as they would have on dedicated hardware. + +Compared to to containers, a Virtual Machine is heavier to run, provides more isolation, +gets its own set of resources and does minimal sharing. + +*Also known as : VM* + diff --git a/docs/reference/logging/fluentd.md b/docs/reference/logging/fluentd.md new file mode 100644 index 00000000..71af9f0b --- /dev/null +++ b/docs/reference/logging/fluentd.md @@ -0,0 +1,109 @@ + + +# Fluentd logging driver + +The `fluentd` logging driver sends container logs to the +[Fluentd](http://www.fluentd.org/) collector as structured log data. Then, users +can use any of the [various output plugins of +Fluentd](http://www.fluentd.org/plugins) to write these logs to various +destinations. + +In addition to the log message itself, the `fluentd` log +driver sends the following metadata in the structured log message: + +| Field | Description | +-------------------|-------------------------------------| +| `container_id` | The full 64-character container ID. | +| `container_name` | The container name at the time it was started. If you use `docker rename` to rename a container, the new name is not reflected in the journal entries. | +| `source` | `stdout` or `stderr` | + +## Usage + +Configure the default logging driver by passing the +`--log-driver` option to the Docker daemon: + + docker --log-driver=fluentd + +To set the logging driver for a specific container, pass the +`--log-driver` option to `docker run`: + + docker run --log-driver=fluentd ... + +Before using this logging driver, launch a Fluentd daemon. The logging driver +connects to this daemon through `localhost:24224` by default. Use the +`fluentd-address` option to connect to a different address. + + docker run --log-driver=fluentd --log-opt fluentd-address=myhost.local:24224 + +If container cannot connect to the Fluentd daemon, the container stops +immediately. + +## Options + +Users can use the `--log-opt NAME=VALUE` flag to specify additional Fluentd logging driver options. + +### fluentd-address + +By default, the logging driver connects to `localhost:24224`. Supply the +`fluentd-address` option to connect to a different address. + + docker run --log-driver=fluentd --log-opt fluentd-address=myhost.local:24224 + +### fluentd-tag + +Every Fluentd's event has a tag that indicates where the log comes from. By +default, the driver uses the `docker.{{.ID}}` tag. Use the `fluentd-tag` option +to change this behavior. + +When specifying a `fluentd-tag` value, you can use the following markup tags: + + - `{{.ID}}`: short container id (12 characters) + - `{{.FullID}}`: full container id + - `{{.Name}}`: container name + +## Note regarding container names + +At startup time, the system sets the `container_name` field and `{{.Name}}` +in the tags to their values at startup. If you use `docker rename` to rename a +container, the new name is not be reflected in `fluentd` messages. Instead, +these messages continue to use the original container name. + +## Fluentd daemon management with Docker + +About `Fluentd` itself, see [the project webpage](http://www.fluentd.org) +and [its documents](http://docs.fluentd.org/). + +To use this logging driver, start the `fluentd` daemon on a host. We recommend +that you use [the Fluentd docker +image](https://registry.hub.docker.com/u/fluent/fluentd/). This image is +especially useful if you want to aggregate multiple container logs on a each +host then, later, transfer the logs to another Fluentd node to create an +aggregate store. + +### Testing container loggers + +1. Write a configuration file (`test.conf`) to dump input logs: + + + @type forward + + + + @type stdout + + +2. Launch Fluentd container with this configuration file: + + $ docker run -it -p 24224:24224 -v /path/to/conf/test.conf:/fluentd/etc -e FLUENTD_CONF=test.conf fluent/fluentd:latest + +3. Start one or more containers with the `fluentd` logging driver: + + $ docker run --log-driver=fluentd your/application diff --git a/docs/reference/logging/index.md b/docs/reference/logging/index.md new file mode 100644 index 00000000..c6b937d4 --- /dev/null +++ b/docs/reference/logging/index.md @@ -0,0 +1,129 @@ + + + +# Configure logging drivers + +The container can have a different logging driver than the Docker daemon. Use +the `--log-driver=VALUE` with the `docker run` command to configure the +container's logging driver. The following options are supported: + +| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | +|-------------|-------------------------------------------------------------------------------------------------------------------------------| +| `json-file` | Default logging driver for Docker. Writes JSON messages to file. | +| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | +| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | +| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | +| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | + +The `docker logs`command is available only for the `json-file` logging driver. + +### The json-file options + +The following logging options are supported for the `json-file` logging driver: + + --log-opt max-size=[0-9+][k|m|g] + --log-opt max-file=[0-9+] + +Logs that reach `max-size` are rolled over. You can set the size in kilobytes(k), megabytes(m), or gigabytes(g). eg `--log-opt max-size=50m`. If `max-size` is not set, then logs are not rolled over. + + +`max-file` specifies the maximum number of files that a log is rolled over before being discarded. eg `--log-opt max-file=100`. If `max-size` is not set, then `max-file` is not honored. + +If `max-size` and `max-file` are set, `docker logs` only returns the log lines from the newest log file. + +### The syslog options + +The following logging options are supported for the `syslog` logging driver: + + --log-opt syslog-address=[tcp|udp]://host:port + --log-opt syslog-address=unix://path + --log-opt syslog-facility=daemon + --log-opt syslog-tag="mailer" + +`syslog-address` specifies the remote syslog server address where the driver connects to. +If not specified it defaults to the local unix socket of the running system. +If transport is either `tcp` or `udp` and `port` is not specified it defaults to `514` +The following example shows how to have the `syslog` driver connect to a `syslog` +remote server at `192.168.0.42` on port `123` + + $ docker run --log-driver=syslog --log-opt syslog-address=tcp://192.168.0.42:123 + +The `syslog-facility` option configures the syslog facility. By default, the system uses the +`daemon` value. To override this behavior, you can provide an integer of 0 to 23 or any of +the following named facilities: + +* `kern` +* `user` +* `mail` +* `daemon` +* `auth` +* `syslog` +* `lpr` +* `news` +* `uucp` +* `cron` +* `authpriv` +* `ftp` +* `local0` +* `local1` +* `local2` +* `local3` +* `local4` +* `local5` +* `local6` +* `local7` + +The `syslog-tag` specifies a tag that identifies the container's syslog messages. By default, +the system uses the first 12 characters of the container id. To override this behavior, specify +a `syslog-tag` option + +## Specify journald options + +The `journald` logging driver stores the container id in the journal's `CONTAINER_ID` field. For detailed information on +working with this logging driver, see [the journald logging driver](/reference/logging/journald/) +reference documentation. + +## Specify gelf options + +The GELF logging driver supports the following options: + + --log-opt gelf-address=udp://host:port + --log-opt gelf-tag="database" + +The `gelf-address` option specifies the remote GELF server address that the +driver connects to. Currently, only `udp` is supported as the transport and you must +specify a `port` value. The following example shows how to connect the `gelf` +driver to a GELF remote server at `192.168.0.42` on port `12201` + + $ docker run --log-driver=gelf --log-opt gelf-address=udp://192.168.0.42:12201 + +The `gelf-tag` option specifies a tag for easy container identification. + +## Specify fluentd options + +You can use the `--log-opt NAME=VALUE` flag to specify these additional Fluentd logging driver options. + + - `fluentd-address`: specify `host:port` to connect [localhost:24224] + - `fluentd-tag`: specify tag for `fluentd` message, + +When specifying a `fluentd-tag` value, you can use the following markup tags: + + - `{{.ID}}`: short container id (12 characters) + - `{{.FullID}}`: full container id + - `{{.Name}}`: container name + +For example, to specify both additional options: + +`docker run --log-driver=fluentd --log-opt fluentd-address=localhost:24224 --log-opt fluentd-tag=docker.{{.Name}}` + +If container cannot connect to the Fluentd daemon on the specified address, +the container stops immediately. For detailed information on working with this +logging driver, see [the fluentd logging driver](/reference/logging/fluentd/) diff --git a/docs/reference/logging/journald.md b/docs/reference/logging/journald.md new file mode 100644 index 00000000..b5029716 --- /dev/null +++ b/docs/reference/logging/journald.md @@ -0,0 +1,76 @@ + + +# Journald logging driver + +The `journald` logging driver sends container logs to the [systemd +journal](http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html). Log entries can be retrieved using the `journalctl` +command or through use of the journal API. + +In addition to the text of the log message itself, the `journald` log +driver stores the following metadata in the journal with each message: + +| Field | Description | +----------------------|-------------| +| `CONTAINER_ID` | The container ID truncated to 12 characters. | +| `CONTAINER_ID_FULL` | The full 64-character container ID. | +| `CONTAINER_NAME` | The container name at the time it was started. If you use `docker rename` to rename a container, the new name is not reflected in the journal entries. | + +## Usage + +You can configure the default logging driver by passing the +`--log-driver` option to the Docker daemon: + + docker --log-driver=journald + +You can set the logging driver for a specific container by using the +`--log-driver` option to `docker run`: + + docker run --log-driver=journald ... + +## Note regarding container names + +The value logged in the `CONTAINER_NAME` field is the container name +that was set at startup. If you use `docker rename` to rename a +container, the new name will not be reflected in the journal entries. +Journal entries will continue to use the original name. + +## Retrieving log messages with journalctl + +You can use the `journalctl` command to retrieve log messages. You +can apply filter expressions to limit the retrieved messages to a +specific container. For example, to retrieve all log messages from a +container referenced by name: + + # journalctl CONTAINER_NAME=webserver + +You can make use of additional filters to further limit the messages +retrieved. For example, to see just those messages generated since +the system last booted: + + # journalctl -b CONTAINER_NAME=webserver + +Or to retrieve log messages in JSON format with complete metadata: + + # journalctl -o json CONTAINER_NAME=webserver + +## Retrieving log messages with the journal API + +This example uses the `systemd` Python module to retrieve container +logs: + + import systemd.journal + + reader = systemd.journal.Reader() + reader.add_match('CONTAINER_NAME=web') + + for msg in reader: + print '{CONTAINER_ID_FULL}: {MESSAGE}'.format(**msg) + diff --git a/docs/reference/run.md b/docs/reference/run.md new file mode 100644 index 00000000..62c175c2 --- /dev/null +++ b/docs/reference/run.md @@ -0,0 +1,1171 @@ + + + + +# Docker run reference + +**Docker runs processes in isolated containers**. When an operator +executes `docker run`, she starts a process with its own file system, +its own networking, and its own isolated process tree. The +[*Image*](/terms/image/#image) which starts the process may define +defaults related to the binary to run, the networking to expose, and +more, but `docker run` gives final control to the operator who starts +the container from the image. That's the main reason +[*run*](/reference/commandline/cli/#run) has more options than any +other `docker` command. + +## General form + +The basic `docker run` command takes this form: + + $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] + +To learn how to interpret the types of `[OPTIONS]`, +see [*Option types*](/reference/commandline/cli/#option-types). + +The `run` options control the image's runtime behavior in a container. These +settings affect: + + * detached or foreground running + * container identification + * network settings + * runtime constraints on CPU and memory + * privileges and LXC configuration + +An image developer may set defaults for these same settings when they create the +image using the `docker build` command. Operators, however, can override all +defaults set by the developer using the `run` options. And, operators can also +override nearly all the defaults set by the Docker runtime itself. + +Finally, depending on your Docker system configuration, you may be required to +preface each `docker` command with `sudo`. To avoid having to use `sudo` with +the `docker` command, your system administrator can create a Unix group called +`docker` and add users to it. For more information about this configuration, +refer to the Docker installation documentation for your operating system. + +## Operator exclusive options + +Only the operator (the person executing `docker run`) can set the +following options. + + - [Detached vs Foreground](#detached-vs-foreground) + - [Detached (-d)](#detached-d) + - [Foreground](#foreground) + - [Container Identification](#container-identification) + - [Name (--name)](#name-name) + - [PID Equivalent](#pid-equivalent) + - [IPC Settings (--ipc)](#ipc-settings-ipc) + - [Network Settings](#network-settings) + - [Restart Policies (--restart)](#restart-policies-restart) + - [Clean Up (--rm)](#clean-up-rm) + - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory) + - [Runtime Privilege, Linux Capabilities, and LXC Configuration](#runtime-privilege-linux-capabilities-and-lxc-configuration) + +## Detached vs foreground + +When starting a Docker container, you must first decide if you want to +run the container in the background in a "detached" mode or in the +default foreground mode: + + -d=false: Detached mode: Run container in the background, print new container id + +### Detached (-d) + +In detached mode (`-d=true` or just `-d`), all I/O should be done +through network connections or shared volumes because the container is +no longer listening to the command line where you executed `docker run`. +You can reattach to a detached container with `docker` +[*attach*](/reference/commandline/cli/#attach). If you choose to run a +container in the detached mode, then you cannot use the `--rm` option. + +### Foreground + +In foreground mode (the default when `-d` is not specified), `docker +run` can start the process in the container and attach the console to +the process's standard input, output, and standard error. It can even +pretend to be a TTY (this is what most command line executables expect) +and pass along signals. All of that is configurable: + + -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` + -t=false : Allocate a pseudo-tty + --sig-proxy=true: Proxify all received signal to the process (non-TTY mode only) + -i=false : Keep STDIN open even if not attached + +If you do not specify `-a` then Docker will [attach all standard +streams]( https://github.com/docker/docker/blob/ +75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797). You can +specify to which of the three standard streams (`STDIN`, `STDOUT`, +`STDERR`) you'd like to connect instead, as in: + + $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + +For interactive processes (like a shell), you must use `-i -t` together in +order to allocate a tty for the container process. `-i -t` is often written `-it` +as you'll see in later examples. Specifying `-t` is forbidden when the client +standard output is redirected or piped, such as in: +`echo test | docker run -i busybox cat`. + +>**Note**: A process running as PID 1 inside a container is treated +>specially by Linux: it ignores any signal with the default action. +>So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is +>coded to do so. + +## Container identification + +### Name (--name) + +The operator can identify a container in three ways: + +- UUID long identifier + ("f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778") +- UUID short identifier ("f78375b1c487") +- Name ("evil_ptolemy") + +The UUID identifiers come from the Docker daemon, and if you do not +assign a name to the container with `--name` then the daemon will also +generate a random string name too. The name can become a handy way to +add meaning to a container since you can use this name when defining +[*links*](/userguide/dockerlinks) (or any +other place you need to identify a container). This works for both +background and foreground Docker containers. + +### PID equivalent + +Finally, to help with automation, you can have Docker write the +container ID out to a file of your choosing. This is similar to how some +programs might write out their process ID to a file (you've seen them as +PID files): + + --cidfile="": Write the container ID to the file + +### Image[:tag] + +While not strictly a means of identifying a container, you can specify a version of an +image you'd like to run the container with by adding `image[:tag]` to the command. For +example, `docker run ubuntu:14.04`. + +### Image[@digest] + +Images using the v2 or later image format have a content-addressable identifier +called a digest. As long as the input used to generate the image is unchanged, +the digest value is predictable and referenceable. + +## PID settings (--pid) + + --pid="" : Set the PID (Process) Namespace mode for the container, + 'host': use the host's PID namespace inside the container + +By default, all containers have the PID namespace enabled. + +PID namespace provides separation of processes. The PID Namespace removes the +view of the system processes, and allows process ids to be reused including +pid 1. + +In certain cases you want your container to share the host's process namespace, +basically allowing processes within the container to see all of the processes +on the system. For example, you could build a container with debugging tools +like `strace` or `gdb`, but want to use these tools when debugging processes +within the container. + + $ docker run --pid=host rhel7 strace -p 1234 + +This command would allow you to use `strace` inside the container on pid 1234 on +the host. + +## UTS settings (--uts) + + --uts="" : Set the UTS namespace mode for the container, + 'host': use the host's UTS namespace inside the container + +The UTS namespace is for setting the hostname and the domain that is visible +to running processes in that namespace. By default, all containers, including +those with `--net=host`, have their own UTS namespace. The `host` setting will +result in the container using the same UTS namespace as the host. + +You may wish to share the UTS namespace with the host if you would like the +hostname of the container to change as the hostname of the host changes. A +more advanced use case would be changing the host's hostname from a container. + +> **Note**: `--uts="host"` gives the container full access to change the +> hostname of the host and is therefore considered insecure. + +## IPC settings (--ipc) + + --ipc="" : Set the IPC mode for the container, + 'container:': reuses another container's IPC namespace + 'host': use the host's IPC namespace inside the container + +By default, all containers have the IPC namespace enabled. + +IPC (POSIX/SysV IPC) namespace provides separation of named shared memory +segments, semaphores and message queues. + +Shared memory segments are used to accelerate inter-process communication at +memory speed, rather than through pipes or through the network stack. Shared +memory is commonly used by databases and custom-built (typically C/OpenMPI, +C++/using boost libraries) high performance applications for scientific +computing and financial services industries. If these types of applications +are broken into multiple containers, you might need to share the IPC mechanisms +of the containers. + +## Network settings + + --dns=[] : Set custom dns servers for the container + --net="bridge" : Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container + --add-host="" : Add a line to /etc/hosts (host:IP) + --mac-address="" : Sets the container's Ethernet device's MAC address + +By default, all containers have networking enabled and they can make any +outgoing connections. The operator can completely disable networking +with `docker run --net none` which disables all incoming and outgoing +networking. In cases like this, you would perform I/O through files or +`STDIN` and `STDOUT` only. + +Publishing ports and linking to other containers will not work +when `--net` is anything other than the default (bridge). + +Your container will use the same DNS servers as the host by default, but +you can override this with `--dns`. + +By default, the MAC address is generated using the IP address allocated to the +container. You can set the container's MAC address explicitly by providing a +MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`). + +Supported networking modes are: + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModeDescription
none + No networking in the container. +
bridge (default) + Connect the container to the bridge via veth interfaces. +
host + Use the host's network stack inside the container. +
container:<name|id> + Use the network stack of another container, specified via + its *name* or *id*. +
+ +#### Mode: none + +With the networking mode set to `none` a container will not have a +access to any external routes. The container will still have a +`loopback` interface enabled in the container but it does not have any +routes to external traffic. + +#### Mode: bridge + +With the networking mode set to `bridge` a container will use docker's +default networking setup. A bridge is setup on the host, commonly named +`docker0`, and a pair of `veth` interfaces will be created for the +container. One side of the `veth` pair will remain on the host attached +to the bridge while the other side of the pair will be placed inside the +container's namespaces in addition to the `loopback` interface. An IP +address will be allocated for containers on the bridge's network and +traffic will be routed though this bridge to the container. + +#### Mode: host + +With the networking mode set to `host` a container will share the host's +network stack and all interfaces from the host will be available to the +container. The container's hostname will match the hostname on the host +system. Note that `--add-host` `--hostname` `--dns` `--dns-search` and +`--mac-address` is invalid in `host` netmode. + +Compared to the default `bridge` mode, the `host` mode gives *significantly* +better networking performance since it uses the host's native networking stack +whereas the bridge has to go through one level of virtualization through the +docker daemon. It is recommended to run containers in this mode when their +networking performance is critical, for example, a production Load Balancer +or a High Performance Web Server. + +> **Note**: `--net="host"` gives the container full access to local system +> services such as D-bus and is therefore considered insecure. + +#### Mode: container + +With the networking mode set to `container` a container will share the +network stack of another container. The other container's name must be +provided in the format of `--net container:`. Note that `--add-host` +`--hostname` `--dns` `--dns-search` and `--mac-address` is invalid +in `container` netmode, and `--publish` `--publish-all` `--expose` are also +invalid in `container` netmode. + +Example running a Redis container with Redis binding to `localhost` then +running the `redis-cli` command and connecting to the Redis server over the +`localhost` interface. + + $ docker run -d --name redis example/redis --bind 127.0.0.1 + $ # use the redis container's network stack to access localhost + $ docker run --rm -it --net container:redis example/redis-cli -h 127.0.0.1 + +### Managing /etc/hosts + +Your container will have lines in `/etc/hosts` which define the hostname of the +container itself as well as `localhost` and a few other common things. The +`--add-host` flag can be used to add additional lines to `/etc/hosts`. + + $ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts + 172.17.0.22 09d03f76bf2c + fe00::0 ip6-localnet + ff00::0 ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + 86.75.30.9 db-static + +## Restart policies (--restart) + +Using the `--restart` flag on Docker run you can specify a restart policy for +how a container should or should not be restarted on exit. + +When a restart policy is active on a container, it will be shown as either `Up` +or `Restarting` in [`docker ps`](/reference/commandline/cli/#ps). It can also be +useful to use [`docker events`](/reference/commandline/cli/#events) to see the +restart policy in effect. + +Docker supports the following restart policies: + + + + + + + + + + + + + + + + + + + + + + +
PolicyResult
no + Do not automatically restart the container when it exits. This is the + default. +
+ + on-failure[:max-retries] + + + Restart only if the container exits with a non-zero exit status. + Optionally, limit the number of restart retries the Docker + daemon attempts. +
always + Always restart the container regardless of the exit status. + When you specify always, the Docker daemon will try to restart + the container indefinitely. +
+ +An ever increasing delay (double the previous delay, starting at 100 +milliseconds) is added before each restart to prevent flooding the server. +This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, +and so on until either the `on-failure` limit is hit, or when you `docker stop` +or `docker rm -f` the container. + +If a container is successfully restarted (the container is started and runs +for at least 10 seconds), the delay is reset to its default value of 100 ms. + +You can specify the maximum amount of times Docker will try to restart the +container when using the **on-failure** policy. The default is that Docker +will try forever to restart the container. The number of (attempted) restarts +for a container can be obtained via [`docker inspect`]( +/reference/commandline/cli/#inspect). For example, to get the number of restarts +for container "my-container"; + + $ docker inspect -f "{{ .RestartCount }}" my-container + # 2 + +Or, to get the last time the container was (re)started; + + $ docker inspect -f "{{ .State.StartedAt }}" my-container + # 2015-03-04T23:47:07.691840179Z + +You cannot set any restart policy in combination with +["clean up (--rm)"](#clean-up-rm). Setting both `--restart` and `--rm` +results in an error. + +### Examples + + $ docker run --restart=always redis + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + + $ docker run --restart=on-failure:10 redis + +This will run the `redis` container with a restart policy of **on-failure** +and a maximum restart count of 10. If the `redis` container exits with a +non-zero exit status more than 10 times in a row Docker will abort trying to +restart the container. Providing a maximum restart limit is only valid for the +**on-failure** policy. + +## Clean up (--rm) + +By default a container's file system persists even after the container +exits. This makes debugging a lot easier (since you can inspect the +final state) and you retain all your data by default. But if you are +running short-term **foreground** processes, these container file +systems can really pile up. If instead you'd like Docker to +**automatically clean up the container and remove the file system when +the container exits**, you can add the `--rm` flag: + + --rm=false: Automatically remove the container when it exits (incompatible with -d) + +## Security configuration + --security-opt="label:user:USER" : Set the label user for the container + --security-opt="label:role:ROLE" : Set the label role for the container + --security-opt="label:type:TYPE" : Set the label type for the container + --security-opt="label:level:LEVEL" : Set the label level for the container + --security-opt="label:disable" : Turn off label confinement for the container + --security-opt="apparmor:PROFILE" : Set the apparmor profile to be applied + to the container + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. For example, you can specify the MCS/MLS level, a +requirement for MLS systems. Specifying the level in the following command +allows you to share the same content between containers. + + $ docker run --security-opt label:level:s0:c100,c200 -i -t fedora bash + +An MLS example might be: + + $ docker run --security-opt label:level:TopSecret -i -t rhel7 bash + +To disable the security labeling for this container versus running with the +`--permissive` flag, use the following command: + + $ docker run --security-opt label:disable -i -t fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + $ docker run --security-opt label:type:svirt_apache_t -i -t centos bash + +Note: + +You would have to write policy defining a `svirt_apache_t` type. + +## Specifying custom cgroups + +Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a +container in. This allows you to create and manage cgroups on their own. You can +define custom resources for those cgroups and put containers under a common +parent group. + +## Runtime constraints on resources + +The operator can also adjust the performance parameters of the +container: + + -m, --memory="": Memory limit (format: , where unit = b, k, m or g) + --memory-swap="": Total memory limit (memory + swap, format: , where unit = b, k, m or g) + -c, --cpu-shares=0: CPU shares (relative weight) + --cpu-period=0: Limit the CPU CFS (Completely Fair Scheduler) period + --cpuset-cpus="": CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems="": Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + --cpu-quota=0: Limit the CPU CFS (Completely Fair Scheduler) quota + --blkio-weight=0: Block IO weight (relative weight) accepts a weight value between 10 and 1000. + --oom-kill-disable=true|false: Whether to disable OOM Killer for the container or not. + --memory-swappiness="": Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + +### Memory constraints + +We have four ways to set memory usage: + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionResult
+ memory=inf, memory-swap=inf (default) + + There is no memory limit for the container. The container can use + as much memory as needed. +
memory=L<inf, memory-swap=inf + (specify memory and set memory-swap as -1) The container is + not allowed to use more than L bytes of memory, but can use as much swap + as is needed (if the host supports swap memory). +
memory=L<inf, memory-swap=2*L + (specify memory without memory-swap) The container is not allowed to + use more than L bytes of memory, swap *plus* memory usage is double + of that. +
+ memory=L<inf, memory-swap=S<inf, L<=S + + (specify both memory and memory-swap) The container is not allowed to + use more than L bytes of memory, swap *plus* memory usage is limited + by S. +
+ +Examples: + + $ docker run -ti ubuntu:14.04 /bin/bash + +We set nothing about memory, this means the processes in the container can use +as much memory and swap memory as they need. + + $ docker run -ti -m 300M --memory-swap -1 ubuntu:14.04 /bin/bash + +We set memory limit and disabled swap memory limit, this means the processes in +the container can use 300M memory and as much swap memory as they need (if the +host supports swap memory). + + $ docker run -ti -m 300M ubuntu:14.04 /bin/bash + +We set memory limit only, this means the processes in the container can use +300M memory and 300M swap memory, by default, the total virtual memory size +(--memory-swap) will be set as double of memory, in this case, memory + swap +would be 2*300M, so processes can use 300M swap memory as well. + + $ docker run -ti -m 300M --memory-swap 1G ubuntu:14.04 /bin/bash + +We set both memory and swap memory, so the processes in the container can use +300M memory and 700M swap memory. + +By default, kernel kills processes in a container if an out-of-memory (OOM) +error occurs. To change this behaviour, use the `--oom-kill-disable` option. +Only disable the OOM killer on containers where you have also set the +`-m/--memory` option. If the `-m` flag is not set, this can result in the host +running out of memory and require killing the host's system processes to free +memory. + +Examples: + +The following example limits the memory to 100M and disables the OOM killer for +this container: + + $ docker run -ti -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash + +The following example, illustrates a dangerous way to use the flag: + + $ docker run -ti --oom-kill-disable ubuntu:14.04 /bin/bash + +The container has unlimited memory which can cause the host to run out memory +and require killing system processes to free memory. + +### Swappiness constraint + +By default, a container's kernel can swap out a percentage of anonymous pages. +To set this percentage for a container, specify a `--memory-swappiness` value +between 0 and 100. A value of 0 turns off anonymous page swapping. A value of +100 sets all anonymous pages as swappable. By default, if you are not using +`--memory-swappiness`, memory swappiness value will be inherited from the parent. + +For example, you can set: + + $ docker run -ti --memory-swappiness=0 ubuntu:14.04 /bin/bash + +Setting the `--memory-swappiness` option is helpful when you want to retain the +container's working set and to avoid swapping performance penalties. + +### CPU share constraint + +By default, all containers get the same proportion of CPU cycles. This proportion +can be modified by changing the container's CPU share weighting relative +to the weighting of all other running containers. + +To modify the proportion from the default of 1024, use the `-c` or `--cpu-shares` +flag to set the weighting to 2 or higher. + +The proportion will only apply when CPU-intensive processes are running. +When tasks in one container are idle, other containers can use the +left-over CPU time. The actual amount of CPU time will vary depending on +the number of containers running on the system. + +For example, consider three containers, one has a cpu-share of 1024 and +two others have a cpu-share setting of 512. When processes in all three +containers attempt to use 100% of CPU, the first container would receive +50% of the total CPU time. If you add a fourth container with a cpu-share +of 1024, the first container only gets 33% of the CPU. The remaining containers +receive 16.5%, 16.5% and 33% of the CPU. + +On a multi-core system, the shares of CPU time are distributed over all CPU +cores. Even if a container is limited to less than 100% of CPU time, it can +use 100% of each individual CPU core. + +For example, consider a system with more than three cores. If you start one +container `{C0}` with `-c=512` running one process, and another container +`{C1}` with `-c=1024` running two processes, this can result in the following +division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +### CPU period constraint + +The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use +`--cpu-period` to set the period of CPUs to limit the container's CPU usage. +And usually `--cpu-period` should work with `--cpu-quota`. + +Examples: + + $ docker run -ti --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash + +If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms. + +For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). + +### Cpuset constraint + +We can set cpus in which to allow execution for containers. + +Examples: + + $ docker run -ti --cpuset-cpus="1,3" ubuntu:14.04 /bin/bash + +This means processes in container can be executed on cpu 1 and cpu 3. + + $ docker run -ti --cpuset-cpus="0-2" ubuntu:14.04 /bin/bash + +This means processes in container can be executed on cpu 0, cpu 1 and cpu 2. + +We can set mems in which to allow execution for containers. Only effective +on NUMA systems. + +Examples: + + $ docker run -ti --cpuset-mems="1,3" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 1 and 3. + + $ docker run -ti --cpuset-mems="0-2" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 0, 1 and 2. + +### CPU quota constraint + +The `--cpu-quota` flag limits the container's CPU usage. The default 0 value +allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair +Scheduler) handles resource allocation for executing processes and is default +Linux Scheduler used by the kernel. Set this value to 50000 to limit the container +to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary. +For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). + +### Block IO bandwidth (Blkio) constraint + +By default, all containers get the same proportion of block IO bandwidth +(blkio). This proportion is 500. To modify this proportion, change the +container's blkio weight relative to the weighting of all other running +containers using the `--blkio-weight` flag. + +The `--blkio-weight` flag can set the weighting to a value between 10 to 1000. +For example, the commands below create two containers with different blkio +weight: + + $ docker run -ti --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash + $ docker run -ti --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash + +If you do block IO in the two containers at the same time, by, for example: + + $ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct + +You'll find that the proportion of time is the same as the proportion of blkio +weights of the two containers. + +> **Note:** The blkio weight setting is only available for direct IO. Buffered IO +> is not currently supported. + +## Additional groups + --group-add: Add Linux capabilities + +By default, the docker container process runs with the supplementary groups looked +up for the specified user. If one wants to add more to that list of groups, then +one can use this flag: + + $ docker run -ti --rm --group-add audio --group-add dbus --group-add 777 busybox id + uid=0(root) gid=0(root) groups=10(wheel),29(audio),81(dbus),777 + +## Runtime privilege, Linux capabilities, and LXC configuration + + --cap-add: Add Linux capabilities + --cap-drop: Drop Linux capabilities + --privileged=false: Give extended privileges to this container + --device=[]: Allows you to run devices inside the container without the --privileged flag. + --lxc-conf=[]: Add custom lxc options + +By default, Docker containers are "unprivileged" and cannot, for +example, run a Docker daemon inside a Docker container. This is because +by default a container is not allowed to access any devices, but a +"privileged" container is given access to all devices (see [lxc-template.go]( +https://github.com/docker/docker/blob/master/daemon/execdriver/lxc/lxc_template.go) +and documentation on [cgroups devices]( +https://www.kernel.org/doc/Documentation/cgroups/devices.txt)). + +When the operator executes `docker run --privileged`, Docker will enable +to access to all devices on the host as well as set some configuration +in AppArmor or SELinux to allow the container nearly all the same access to the +host as processes running outside containers on the host. Additional +information about running with `--privileged` is available on the +[Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/). + +If you want to limit access to a specific device or devices you can use +the `--device` flag. It allows you to specify one or more devices that +will be accessible within the container. + + $ docker run --device=/dev/snd:/dev/snd ... + +By default, the container will be able to `read`, `write`, and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` flag: + + $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk /dev/xvdc + crash.... + + $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted + +In addition to `--privileged`, the operator can have fine grain control over the +capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default +list of capabilities that are kept. The following table lists the Linux capability options which can be added or dropped. + +| Capability Key | Capability Description | +| -------------- | ---------------------- | +| SETPCAP | Modify process capabilities. | +| SYS_MODULE| Load and unload kernel modules. | +| SYS_RAWIO | Perform I/O port operations (iopl(2) and ioperm(2)). | +| SYS_PACCT | Use acct(2), switch process accounting on or off. | +| SYS_ADMIN | Perform a range of system administration operations. | +| SYS_NICE | Raise process nice value (nice(2), setpriority(2)) and change the nice value for arbitrary processes. | +| SYS_RESOURCE | Override resource Limits. | +| SYS_TIME | Set system clock (settimeofday(2), stime(2), adjtimex(2)); set real-time (hardware) clock. | +| SYS_TTY_CONFIG | Use vhangup(2); employ various privileged ioctl(2) operations on virtual terminals. | +| MKNOD | Create special files using mknod(2). | +| AUDIT_WRITE | Write records to kernel auditing log. | +| AUDIT_CONTROL | Enable and disable kernel auditing; change auditing filter rules; retrieve auditing status and filtering rules. | +| MAC_OVERRIDE | Allow MAC configuration or state changes. Implemented for the Smack LSM. | +| MAC_ADMIN | Override Mandatory Access Control (MAC). Implemented for the Smack Linux Security Module (LSM). | +| NET_ADMIN | Perform various network-related operations. | +| SYSLOG | Perform privileged syslog(2) operations. | +| CHOWN | Make arbitrary changes to file UIDs and GIDs (see chown(2)). | +| NET_RAW | Use RAW and PACKET sockets. | +| DAC_OVERRIDE | Bypass file read, write, and execute permission checks. | +| FOWNER | Bypass permission checks on operations that normally require the file system UID of the process to match the UID of the file. | +| DAC_READ_SEARCH | Bypass file read permission checks and directory read and execute permission checks. | +| FSETID | Don't clear set-user-ID and set-group-ID permission bits when a file is modified. | +| KILL | Bypass permission checks for sending signals. | +| SETGID | Make arbitrary manipulations of process GIDs and supplementary GID list. | +| SETUID | Make arbitrary manipulations of process UIDs. | +| LINUX_IMMUTABLE | Set the FS_APPEND_FL and FS_IMMUTABLE_FL i-node flags. | +| NET_BIND_SERVICE | Bind a socket to internet domain privileged ports (port numbers less than 1024). | +| NET_BROADCAST | Make socket broadcasts, and listen to multicasts. | +| IPC_LOCK | Lock memory (mlock(2), mlockall(2), mmap(2), shmctl(2)). | +| IPC_OWNER | Bypass permission checks for operations on System V IPC objects. | +| SYS_CHROOT | Use chroot(2), change root directory. | +| SYS_PTRACE | Trace arbitrary processes using ptrace(2). | +| SYS_BOOT | Use reboot(2) and kexec_load(2), reboot and load a new kernel for later execution. | +| LEASE | Establish leases on arbitrary files (see fcntl(2)). | +| SETFCAP | Set file capabilities.| +| WAKE_ALARM | Trigger something that will wake up the system. | +| BLOCK_SUSPEND | Employ features that can block system suspend. | + +Further reference information is available on the [capabilities(7) - Linux man page](http://linux.die.net/man/7/capabilities) + +Both flags support the value `all`, so if the +operator wants to have all capabilities but `MKNOD` they could use: + + $ docker run --cap-add=ALL --cap-drop=MKNOD ... + +For interacting with the network stack, instead of using `--privileged` they +should use `--cap-add=NET_ADMIN` to modify the network interfaces. + + $ docker run -t -i --rm ubuntu:14.04 ip link add dummy0 type dummy + RTNETLINK answers: Operation not permitted + $ docker run -t -i --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy + +To mount a FUSE based filesystem, you need to combine both `--cap-add` and +`--device`: + + $ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fuse: failed to open /dev/fuse: Operation not permitted + $ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fusermount: mount failed: Operation not permitted + $ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs + # sshfs sven@10.10.10.20:/home/sven /mnt + The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established. + ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6. + Are you sure you want to continue connecting (yes/no)? yes + sven@10.10.10.20's password: + root@30aa0cfaf1b5:/# ls -la /mnt/src/docker + total 1516 + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:08 . + drwxrwxr-x 1 1000 1000 4096 Dec 4 11:46 .. + -rw-rw-r-- 1 1000 1000 16 Oct 8 00:09 .dockerignore + -rwxrwxr-x 1 1000 1000 464 Oct 8 00:09 .drone.yml + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git + -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore + .... + + +If the Docker daemon was started using the `lxc` exec-driver +(`docker -d --exec-driver=lxc`) then the operator can also specify LXC options +using one or more `--lxc-conf` parameters. These can be new parameters or +override existing parameters from the [lxc-template.go]( +https://github.com/docker/docker/blob/master/daemon/execdriver/lxc/lxc_template.go). +Note that in the future, a given host's docker daemon may not use LXC, so this +is an implementation-specific configuration meant for operators already +familiar with using LXC directly. + +> **Note:** +> If you use `--lxc-conf` to modify a container's configuration which is also +> managed by the Docker daemon, then the Docker daemon will not know about this +> modification, and you will need to manage any conflicts yourself. For example, +> you can use `--lxc-conf` to set a container's IP address, but this will not be +> reflected in the `/etc/hosts` file. + +# Logging drivers (--log-driver) + +The container can have a different logging driver than the Docker daemon. Use +the `--log-driver=VALUE` with the `docker run` command to configure the +container's logging driver. The following options are supported: + +| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | +|-------------|-------------------------------------------------------------------------------------------------------------------------------| +| `json-file` | Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver. | +| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | +| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | +| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | +| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | + + The `docker logs`command is available only for the `json-file` logging +driver. For detailed information on working with logging drivers, see +[Configure a logging driver](reference/logging/). + +#### Logging driver: fluentd + +Fluentd logging driver for Docker. Writes log messages to fluentd (forward input). `docker logs` +command is not available for this logging driver. + +Some options are supported by specifying `--log-opt` as many as needed, like `--log-opt fluentd-address=localhost:24224 --log-opt fluentd-tag=docker.{{.Name}}`. + + - `fluentd-address`: specify `host:port` to connect [localhost:24224] + - `fluentd-tag`: specify tag for fluentd message, which interpret some markup, ex `{{.ID}}`, `{{.FullID}}` or `{{.Name}}` [docker.{{.ID}}] + +## Overriding Dockerfile image defaults + +When a developer builds an image from a [*Dockerfile*](/reference/builder) +or when she commits it, the developer can set a number of default parameters +that take effect when the image starts up as a container. + +Four of the Dockerfile commands cannot be overridden at runtime: `FROM`, +`MAINTAINER`, `RUN`, and `ADD`. Everything else has a corresponding override +in `docker run`. We'll go through what the developer might have set in each +Dockerfile instruction and how the operator can override that setting. + + - [CMD (Default Command or Options)](#cmd-default-command-or-options) + - [ENTRYPOINT (Default Command to Execute at Runtime)]( + #entrypoint-default-command-to-execute-at-runtime) + - [EXPOSE (Incoming Ports)](#expose-incoming-ports) + - [ENV (Environment Variables)](#env-environment-variables) + - [VOLUME (Shared Filesystems)](#volume-shared-filesystems) + - [USER](#user) + - [WORKDIR](#workdir) + +## CMD (default command or options) + +Recall the optional `COMMAND` in the Docker +commandline: + + $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] + +This command is optional because the person who created the `IMAGE` may +have already provided a default `COMMAND` using the Dockerfile `CMD` +instruction. As the operator (the person running a container from the +image), you can override that `CMD` instruction just by specifying a new +`COMMAND`. + +If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` +get appended as arguments to the `ENTRYPOINT`. + +## ENTRYPOINT (default command to execute at runtime) + + --entrypoint="": Overwrite the default entrypoint set by the image + +The `ENTRYPOINT` of an image is similar to a `COMMAND` because it +specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The `ENTRYPOINT` gives a +container its default nature or behavior, so that when you set an +`ENTRYPOINT` you can run the container *as if it were that binary*, +complete with default options, and you can pass in more options via the +`COMMAND`. But, sometimes an operator may want to run something else +inside the container, so you can override the default `ENTRYPOINT` at +runtime by using a string to specify the new `ENTRYPOINT`. Here is an +example of how to run a shell in a container that has been set up to +automatically run something else (like `/usr/bin/redis-server`): + + $ docker run -i -t --entrypoint /bin/bash example/redis + +or two examples of how to pass more parameters to that ENTRYPOINT: + + $ docker run -i -t --entrypoint /bin/bash example/redis -c ls -l + $ docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help + +## EXPOSE (incoming ports) + +The Dockerfile doesn't give much control over networking, only providing +the `EXPOSE` instruction to give a hint to the operator about what +incoming ports might provide services. The following options work with +or override the Dockerfile's exposed defaults: + + --expose=[]: Expose a port or a range of ports from the container + without publishing it to your host + -P=false : Publish all exposed ports to the host interfaces + -p=[] : Publish a container᾿s port or a range of ports to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + Both hostPort and containerPort can be specified as a range of ports. + When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) + (use 'docker port' to see the actual mapping) + --link="" : Add link to another container (:alias or ) + +As mentioned previously, `EXPOSE` (and `--expose`) makes ports available +**in** a container for incoming connections. The port number on the +inside of the container (where the service listens) does not need to be +the same number as the port exposed on the outside of the container +(where clients connect), so inside the container you might have an HTTP +service listening on port 80 (and so you `EXPOSE 80` in the Dockerfile), +but outside the container the port might be 42800. + +To help a new client container reach the server container's internal +port operator `--expose`'d by the operator or `EXPOSE`'d by the +developer, the operator has three choices: start the server container +with `-P` or `-p,` or start the client container with `--link`. + +If the operator uses `-P` or `-p` then Docker will make the exposed port +accessible on the host and the ports will be available to any client that can +reach the host. When using `-P`, Docker will bind the exposed port to a random +port on the host within an *ephemeral port range* defined by +`/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host +ports and the exposed ports, use `docker port`. + +If the operator uses `--link` when starting the new client container, +then the client container can access the exposed port via a private +networking interface. Docker will set some environment variables in the +client container to help indicate which interface and port to use. + +## ENV (environment variables) + +When a new container is created, Docker will set the following environment +variables automatically: + + + + + + + + + + + + + + + + + + + + + +
VariableValue
HOME + Set based on the value of USER +
HOSTNAME + The hostname associated with the container +
PATH + Includes popular directories, such as :
+ /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +
TERMxterm if the container is allocated a pseudo-TTY
+ +The container may also include environment variables defined +as a result of the container being linked with another container. See +the [*Container Links*](/userguide/dockerlinks/#container-linking) +section for more details. + +Additionally, the operator can **set any environment variable** in the +container by using one or more `-e` flags, even overriding those mentioned +above, or already defined by the developer with a Dockerfile `ENV`: + + $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export + declare -x HOME="/" + declare -x HOSTNAME="85bc26a0e200" + declare -x OLDPWD + declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + declare -x PWD="/" + declare -x SHLVL="1" + declare -x container="lxc" + declare -x deep="purple" + +Similarly the operator can set the **hostname** with `-h`. + +`--link :alias` also sets environment variables, using the *alias* string to +define environment variables within the container that give the IP and PORT +information for connecting to the service container. Let's imagine we have a +container running Redis: + + # Start the service container, named redis-name + $ docker run -d --name redis-name dockerfiles/redis + 4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3 + + # The redis-name container exposed port 6379 + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 4241164edf6f $ dockerfiles/redis:latest /redis-stable/src/re 5 seconds ago Up 4 seconds 6379/tcp redis-name + + # Note that there are no public ports exposed since we didn᾿t use -p or -P + $ docker port 4241164edf6f 6379 + 2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f + +Yet we can get information about the Redis container's exposed ports +with `--link`. Choose an alias that will form a +valid environment variable! + + $ docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export + declare -x HOME="/" + declare -x HOSTNAME="acda7f7b1cdc" + declare -x OLDPWD + declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + declare -x PWD="/" + declare -x REDIS_ALIAS_NAME="/distracted_wright/redis" + declare -x REDIS_ALIAS_PORT="tcp://172.17.0.32:6379" + declare -x REDIS_ALIAS_PORT_6379_TCP="tcp://172.17.0.32:6379" + declare -x REDIS_ALIAS_PORT_6379_TCP_ADDR="172.17.0.32" + declare -x REDIS_ALIAS_PORT_6379_TCP_PORT="6379" + declare -x REDIS_ALIAS_PORT_6379_TCP_PROTO="tcp" + declare -x SHLVL="1" + declare -x container="lxc" + +And we can use that information to connect from another container as a client: + + $ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' + 172.17.0.32:6379> + +Docker will also map the private IP address to the alias of a linked +container by inserting an entry into `/etc/hosts`. You can use this +mechanism to communicate with a linked container by its alias: + + $ docker run -d --name servicename busybox sleep 30 + $ docker run -i -t --link servicename:servicealias busybox ping -c 1 servicealias + +If you restart the source container (`servicename` in this case), the recipient +container's `/etc/hosts` entry will be automatically updated. + +> **Note**: +> Unlike host entries in the `/etc/hosts` file, IP addresses stored in the +> environment variables are not automatically updated if the source container is +> restarted. We recommend using the host entries in `/etc/hosts` to resolve the +> IP address of linked containers. + +## VOLUME (shared filesystems) + + -v=[]: Create a bind mount with: [host-dir:]container-dir[:rw|ro]. + If 'host-dir' is missing, then docker creates a new volume. + If neither 'rw' or 'ro' is specified then the volume is mounted + in read-write mode. + --volumes-from="": Mount all volumes from the given container(s) + +The volumes commands are complex enough to have their own documentation +in section [*Managing data in +containers*](/userguide/dockervolumes). A developer can define +one or more `VOLUME`'s associated with an image, but only the operator +can give access from one container to another (or from a container to a +volume mounted on the host). + +## USER + +The default user within a container is `root` (id = 0), but if the +developer created additional users, those are accessible too. The +developer can set a default user to run the first process with the +Dockerfile `USER` instruction, but the operator can override it: + + -u="": Username or UID + +> **Note:** if you pass numeric uid, it must be in range 0-2147483647. + +## WORKDIR + +The default working directory for running binaries within a container is the +root directory (`/`), but the developer can set a different default with the +Dockerfile `WORKDIR` command. The operator can override this with: + + -w="": Working directory inside the container diff --git a/docs/security/apparmor.md b/docs/security/apparmor.md new file mode 100644 index 00000000..1e82200b --- /dev/null +++ b/docs/security/apparmor.md @@ -0,0 +1,41 @@ +AppArmor security profiles for Docker +-------------------------------------- + +AppArmor (Application Armor) is a security module that allows a system +administrator to associate a security profile with each program. Docker +expects to find an AppArmor policy loaded and enforced. + +Container profiles are loaded automatically by Docker. A profile +for the Docker Engine itself also exists and is installed +with the official *.deb* packages. Advanced users and package +managers may find the profile for */usr/bin/docker* underneath +[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor) +in the Docker Engine source repository. + + +Understand the policies +------------------------ + +The `docker-default` profile the default for running +containers. It is moderately protective while +providing wide application compatability. + +The system's standard `unconfined` profile inherits all +system-wide policies, applying path-based policies +intended for the host system inside of containers. +This was the default for privileged containers +prior to Docker 1.8. + + +Overriding the profile for a container +--------------------------------------- + +Users may override the AppArmor profile using the +`security-opt` option (per-container). + +For example, the following explicitly specifies the default policy: + +``` +$ docker run --rm -it --security-opt apparmor:docker-default hello-world +``` + diff --git a/docs/security/trust/content_trust.md b/docs/security/trust/content_trust.md new file mode 100644 index 00000000..ee76ffdc --- /dev/null +++ b/docs/security/trust/content_trust.md @@ -0,0 +1,291 @@ + + +# Content trust in Docker + +When transferring data among networked systems, *trust* is a central concern. In +particular, when communicating over an untrusted medium such as the internet, it +is critical to ensure the integrity and publisher of all the data a system +operates on. You use Docker to push and pull images (data) to a registry. Content trust +gives you the ability to both verify the integrity and the publisher of all the +data received from a registry over any channel. + +Content trust is currently only available for users of the public Docker Hub. It +is currently not available for the Docker Trusted Registry or for private +registries. + +## Understand trust in Docker + +Content trust allows operations with a remote Docker registry to enforce +client-side signing and verification of image tags. Content trust provides the +ability to use digital signatures for data sent to and received from remote +Docker registries. These signatures allow client-side verification of the +integrity and publisher of specific image tags. + +Currently, content trust is disabled by default. You must enabled it by setting +the `DOCKER_CONTENT_TRUST` environment variable. + +Once content trust is enabled, image publishers can sign their images. Image consumers can +ensure that the images they use are signed. publishers and consumers can be +individuals alone or in organizations. Docker's content trust supports users and +automated processes such as builds. + +### Image tags and content trust + +An individual image record has the following identifier: + +``` +[REGISTRY_HOST[:REGISTRY_PORT]/]REPOSITORY[:TAG] +``` + +A particular image `REPOSITORY` can have multiple tags. For example, `latest` and + `3.1.2` are both tags on the `mongo` image. An image publisher can build an image + and tag combination many times changing the image with each build. + +Content trust is associated with the `TAG` portion of an image. Each image +repository has a set of keys that image publishers use to sign an image tag. +Image publishers have discretion on which tags they sign. + +An image repository can contain an image with one tag that is signed and another +tag that is not. For example, consider [the Mongo image +repository](https://hub.docker.com/r/library/mongo/tags/). The `latest` +tag could be unsigned while the `3.1.6` tag could be signed. It is the +responsibility of the image publisher to decide if an image tag is signed or +not. In this representation, some image tags are signed, others are not: + +![Signed tags](../images/tag_signing.png) + +Publishers can choose to sign a specific tag or not. As a result, the content of +an unsigned tag and that of a signed tag with the same name may not match. For +example, a publisher can push a tagged image `someimage:latest` and sign it. +Later, the same publisher can push an unsigned `someimage:latest` image. This second +push replaces the last unsigned tag `latest` but does not affect the signed `latest` version. +The ability to choose which tags they can sign, allows publishers to iterate over +the unsigned version of an image before officially signing it. + +Image consumers can enable content trust to ensure that images they use were +signed. If a consumer enables content trust, they can only pull, run, or build +with trusted images. Enabling content trust is like wearing a pair of +rose-colored glasses. Consumers "see" only signed images tags and the less +desirable, unsigned image tags are "invisible" to them. + +![Trust view](../images/trust_view.png) + +To the consumer who does not enabled content trust, nothing about how they +work with Docker images changes. Every image is visible regardless of whether it +is signed or not. + + +### Content trust operations and keys + +When content trust is enabled, `docker` CLI commands that operate on tagged images must +either have content signatures or explicit content hashes. The commands that +operate with content trust are: + +* `push` +* `build` +* `create` +* `pull` +* `run` + +For example, with content trust enabled a `docker pull someimage:latest` only +succeeds if `someimage:latest` is signed. However, an operation with an explicit +content hash always succeeds as long as the hash exists: + +```bash +$ docker pull someimage@sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a +``` + +Trust for an image tag is managed through the use of signing keys. Docker's content +trust makes use four different keys: + +| Key | Description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| offline key | Root of content trust for a image tag. When content trust is enabled, you create the offline key once. | +| target and snapshot | These two keys are known together as the "tagging" key. When content trust is enabled, you create this key when you add a new image repository. If you have the offline key, you can export the tagging key and allow other publishers to sign the image tags. | +| timestamp | This key applies to a repository. It allows Docker repositories to have freshness security guarantees without requiring periodic content refreshes on the client's side. | + +With the exception of the timestamp, all the keys are generated and stored locally +client-side. The timestamp is safely generated and stored in a signing server that +is deployed alongside the Docker registry. All keys are generated in a backend +service that isn't directly exposed to the internet and are encrypted at rest. + +The following image depicts the various signing keys and their relationships: + +![Content trust components](../images/trust_components.png) + +>**WARNING**: Loss of the offline key is **very difficult** to recover from. +>Correcting this loss requires intervention from [Docker +>Support](https://support.docker.com) to reset the repository state. This loss +>also requires **manual intervention** from every consumer that used a signed +>tag from this repository prior to the loss. + +You should backup the offline key somewhere safe. Given that it is only required +to create new repositories, it is a good idea to store it offline. Make sure you +read [Manage keys for content trust](/security/trust/trust_key_mng) information +for details on creating, securing, and backing up your keys. + +## Survey of typical content trust operations + +This section surveys the typical trusted operations users perform with Docker +images. + +### Enable content trust + +Enable content trust by setting the `DOCKER_CONTENT_TRUST` environment variable. +Enabling per-shell is useful because you can have one shell configured for +trusted operations and another terminal shell for untrusted operations. You can +also add this declaration to your shell profile to have it turned on always by +default. + +To enable content trust in a `bash` shell enter the following command: + +```bash +export DOCKER_CONTENT_TRUST=1 +``` + +Once set, each of the "tag" operations require key for trusted tag. All of these +commands also support the `--disable-content-trust` flag. This flag allows +publishers to run individual operations on tagged images without content trust on an +as-needed basis. + + +### Push trusted content + +To create signed content for a specific image tag, simply enable content trust and push +a tagged image. If this is the first time you have pushed an image using content trust +on your system, the session looks like this: + +```bash +$ docker push docker/trusttest:latest +The push refers to a repository [docker.io/docker/trusttest] (len: 1) +9a61b6b1315e: Image already exists +902b87aaaec9: Image already exists +latest: digest: sha256:d02adacee0ac7a5be140adb94fa1dae64f4e71a68696e7f8e7cbf9db8dd49418 size: 3220 +Signing and pushing trust metadata +You are about to create a new offline signing key passphrase. This passphrase +will be used to protect the most sensitive key in your signing system. Please +choose a long, complex passphrase and be careful to keep the password and the +key file itself secure and backed up. It is highly recommended that you use a +password manager to generate the passphrase and keep it safe. There will be no +way to recover this key. You can find the key in your config directory. +Enter passphrase for new offline key with id a1d96fb: +Repeat passphrase for new offline key with id a1d96fb: +Enter passphrase for new tagging key with id docker.io/docker/trusttest (3a932f1): +Repeat passphrase for new tagging key with id docker.io/docker/trusttest (3a932f1): +Finished initializing "docker.io/docker/trusttest" +``` +When you push your first tagged image with content trust enabled, the `docker` client +recognizes this is your first push and: + + - alerts you that it will create a new offline key + - requests a passphrase for the key + - generates an offline key in the `~/.docker/trust` directory + - generates a tagging key for in the `~/.docker/trust` directory + +The passphrase you chose for both the offline key and your content key-pair should +be randomly generated and stored in a *password manager*. + +It is important to note, if you had left off the `latest` tag, content trust is skipped. +This is true even if content trust is enabled and even if this is your first push. + +```bash +$ docker push docker/trusttest +The push refers to a repository [docker.io/docker/trusttest] (len: 1) +9a61b6b1315e: Image successfully pushed +902b87aaaec9: Image successfully pushed +latest: digest: sha256:a9a9c4402604b703bed1c847f6d85faac97686e48c579bd9c3b0fa6694a398fc size: 3220 +No tag specified, skipping trust metadata push +``` + +It is skipped because as the message states, you did not supply an image `TAG` +value. In Docker content trust, signatures are associated with tags. + +Once you have an offline key on your system, subsequent images repositories +you create can use that same offline key: + +```bash +$ docker push docker.io/docker/seaside:latest +The push refers to a repository [docker.io/docker/seaside] (len: 1) +a9539b34a6ab: Image successfully pushed +b3dbab3810fc: Image successfully pushed +latest: digest: sha256:d2ba1e603661a59940bfad7072eba698b79a8b20ccbb4e3bfb6f9e367ea43939 size: 3346 +Signing and pushing trust metadata +Enter key passphrase for offline key with id a1d96fb: +Enter passphrase for new tagging key with id docker.io/docker/seaside (bb045e3): +Repeat passphrase for new tagging key with id docker.io/docker/seaside (bb045e3): +Finished initializing "docker.io/docker/seaside" +``` + +The new image has its own tagging key and timestamp key. The `latest` tag is signed with both of +these. + + +### Pull image content + +A common way to consume an image is to `pull` it. With content trust enabled, the Docker +client only allows `docker pull` to retrieve signed images. + +``` +$ docker pull docker/seaside +Using default tag: latest +Pull (1 of 1): docker/trusttest:latest@sha256:d149ab53f871 +... +Tagging docker/trusttest@sha256:d149ab53f871 as docker/trusttest:latest +``` + +The `seaside:latest` image is signed. In the following example, the command does not specify a tag, so the system uses +the `latest` tag by default again and the `docker/cliffs:latest` tag is not signed. + +```bash +$ docker pull docker/cliffs +Using default tag: latest +no trust data available +``` + +Because the tag `docker/cliffs:latest` is not trusted, the `pull` fails. + + +### Disable content trust for specific operations + +A user that wants to disable content trust for a particular operation can use the +`--disable-content-trust` flag. **Warning: this flag disables content trust for +this operation**. With this flag, Docker will ignore content-trust and allow all +operations to be done without verifying any signatures. If we wanted the +previous untrusted build to succeed we could do: + +``` +$ cat Dockerfile +FROM docker/trusttest:notrust +RUN echo +$ docker build --disable-content-trust -t docker/trusttest:testing . +Sending build context to Docker daemon 42.84 MB +... +Successfully built f21b872447dc +``` + +The same is true for all the other commands, such as `pull` and `push`: + +``` +$ docker pull --disable-content-trust docker/trusttest:untrusted +... +$ docker push --disable-content-trust docker/trusttest:untrusted +... +``` + +## Related information + +* [Manage keys for content trust](/security/trust/trust_key_mng) +* [Automation with content trust](/security/trust/trust_automation) +* [Play in a content trust sandbox](/security/trust/trust_sandbox) + + + diff --git a/docs/security/trust/images/tag_signing.png b/docs/security/trust/images/tag_signing.png new file mode 100644 index 00000000..9a1f9062 Binary files /dev/null and b/docs/security/trust/images/tag_signing.png differ diff --git a/docs/security/trust/images/trust_.gliffy b/docs/security/trust/images/trust_.gliffy new file mode 100644 index 00000000..9298984b --- /dev/null +++ b/docs/security/trust/images/trust_.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":1029,"height":814,"nodeIndex":315,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":159,"y":120.286},"max":{"x":1029,"y":814}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":465.5822784810126,"y":531.0,"rotation":0.0,"id":299,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":204,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":733.0,"y":578.0,"rotation":0.0,"id":294,"width":54.0,"height":54.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":200,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":297,"width":88.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Timestamp Key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":311.0,"y":147.0,"rotation":0.0,"id":268,"width":18.0,"height":53.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":178,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":152,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":264,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-3.417721518987321,-4.214000000000027],[9.708860759493689,-4.214000000000027],[9.708860759493689,50.74999999999994],[22.8354430379747,50.74999999999994]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":415.0,"y":313.0,"rotation":0.0,"id":250,"width":7.0,"height":413.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":172,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":79,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[3.5,-3.0],[9.5,497.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":290.0,"y":340.0,"rotation":0.0,"id":11,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.user","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":12,"width":48.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Account

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":479.0,"y":330.0,"rotation":0.0,"id":2,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v4.business.user_group","order":9,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user_group","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":73.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Organization

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":159.0,"y":310.0,"rotation":0.0,"id":79,"width":531.0,"height":500.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":159.00000000000003,"y":320.0,"rotation":0.0,"id":82,"width":108.99999999999999,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":730.0,"y":340.0,"rotation":0.0,"id":86,"width":61.0,"height":79.0,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":87,"width":62.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Offline key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":730.0,"y":455.0,"rotation":0.0,"id":88,"width":61.0,"height":79.0,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":62,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":89,"width":70.0,"height":14.0,"uid":null,"order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Tagging key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":360.4891500904159,"y":650.0,"rotation":0.0,"id":227,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":158,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":228,"width":16.0,"height":18.0,"uid":null,"order":160,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

X

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":185.1428571428571,"y":587.0,"rotation":0.0,"id":109,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":98,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":74,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":99,"width":71.42857142857143,"height":50.0,"uid":null,"order":77,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":98}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":98}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":100,"width":50.0,"height":18.0,"uid":null,"order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":98,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

working

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":95,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":66,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":71.42857142857143,"height":50.0,"uid":null,"order":69,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":95}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":95}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":97,"width":38.0,"height":18.0,"uid":null,"order":72,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":95,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":30,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":24,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":31,"width":110.00000000000001,"height":25.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":32}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":32,"width":110.00000000000001,"height":25.0,"uid":null,"order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":33,"width":110.00000000000001,"height":55.0,"uid":null,"order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":30},{"magnitude":-1,"id":32}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":32,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":184.21428571428567,"y":450.0,"rotation":0.0,"id":253,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":173,"lockAspectRatio":false,"lockShape":false,"children":[{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":125,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":83,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":110.00000000000001,"height":25.0,"uid":null,"order":86,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":127}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":110.00000000000001,"height":25.0,"uid":null,"order":90,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":128,"width":110.00000000000001,"height":55.0,"uid":null,"order":93,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":125},{"magnitude":-1,"id":127}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":127,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":122,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":95,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":123,"width":71.42857142857143,"height":50.0,"uid":null,"order":98,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":122}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":122}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":124,"width":38.0,"height":18.0,"uid":null,"order":101,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":122,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":119,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":103,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":120,"width":71.42857142857143,"height":50.0,"uid":null,"order":106,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":119}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":119}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":121,"width":26.0,"height":18.0,"uid":null,"order":109,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":119,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":479.0,"y":120.74999999999994,"rotation":0.0,"id":261,"width":155.08307142857143,"height":168.072,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":174,"lockAspectRatio":false,"lockShape":false,"children":[{"x":85.65449999999998,"y":38.0,"rotation":0.0,"id":245,"width":28.0,"height":43.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":171,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":204,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.5108499095841808,-13.999999999999972],[16.0465641952984,-13.999999999999972],[16.0465641952984,39.0],[29.582278481012622,39.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":null},{"x":89.65449999999998,"y":25.0,"rotation":0.0,"id":244,"width":24.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":169,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":192,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-1.4891500904158192,-0.9999999999999716],[7.534659433393699,-0.9999999999999716],[16.558468957203104,-0.9999999999999716],[25.582278481012622,-0.9999999999999716]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":null},{"x":115.2367784810126,"y":62.0,"rotation":0.0,"id":204,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":151,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":205,"width":15.0,"height":16.0,"uid":null,"order":154,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

C

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":115.2367784810126,"y":9.000000000000028,"rotation":0.0,"id":192,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":148,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":201,"width":15.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":65.0007929475588,"y":9.000000000000028,"rotation":0.0,"id":193,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":141,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":194,"width":14.0,"height":18.0,"uid":null,"order":144,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":55.08307142857143,"y":0.0,"rotation":0.0,"id":195,"width":100.0,"height":133.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.speech_bubble_right","order":129,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"magnitude":1,"id":197},{"magnitude":1,"id":198}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":196,"width":100.0,"height":118.0,"uid":null,"order":132,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":195,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":195},{"magnitude":-1,"id":198}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":195}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":197,"width":100.0,"height":29.0,"uid":null,"order":136,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":195}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":36.0,"y":117.0,"rotation":0.0,"id":198,"width":24.0,"height":15.0,"uid":null,"order":139,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":24}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":15}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":196,"px":1.0,"py":1.0,"xOffset":-64.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble_right","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":67.0,"rotation":0.0,"id":180,"width":67.309,"height":101.072,"uid":"com.gliffy.shape.cisco.cisco_v1.buildings.generic_building","order":126,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.buildings.generic_building","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":182,"width":56.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Company

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":231.1785714285715,"y":204.78599999999997,"rotation":0.0,"id":0,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.female_user","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.female_user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":43.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Person

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":272.07142857142856,"y":120.286,"rotation":0.0,"id":171,"width":100.0,"height":132.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.speech_bubble_right","order":112,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"magnitude":1,"id":173},{"magnitude":1,"id":174}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":172,"width":100.0,"height":117.0,"uid":null,"order":114,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":171,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":171},{"magnitude":-1,"id":174}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":171}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":173,"width":100.0,"height":29.0,"uid":null,"order":117,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":171}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":36.0,"y":116.0,"rotation":0.0,"id":174,"width":24.0,"height":15.0,"uid":null,"order":119,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":24}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":15}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":172,"px":1.0,"py":1.0,"xOffset":-64.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble_right","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":310.5,"y":146.78599999999997,"rotation":0.0,"id":239,"width":20.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":167,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":152,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":237,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.917721518987321,-4.0],[6.078661844484657,-4.0],[15.075045207956578,-4.0],[24.071428571428555,-4.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":333.8354430379747,"y":182.74999999999994,"rotation":0.0,"id":264,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":175,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":265,"width":21.0,"height":18.0,"uid":null,"order":177,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 N

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":284.4177215189874,"y":127.78599999999997,"rotation":0.0,"id":152,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":120,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":153,"width":14.0,"height":18.0,"uid":null,"order":122,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":334.57142857142856,"y":127.78599999999997,"rotation":0.0,"id":237,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":164,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":238,"width":16.0,"height":18.0,"uid":null,"order":166,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

X

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":500.0,"rotation":0.0,"id":40,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":1,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":41,"width":71.42857142857143,"height":50.0,"uid":null,"order":3,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":40}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":40}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":42,"width":26.0,"height":18.0,"uid":null,"order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":40,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":454.99999999999994,"y":461.0,"rotation":0.0,"id":16,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":15,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":17,"width":110.00000000000001,"height":25.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":18}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":18,"width":110.00000000000001,"height":25.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":19,"width":110.00000000000001,"height":55.0,"uid":null,"order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":16},{"magnitude":-1,"id":18}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":18,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":450.0,"rotation":0.0,"id":37,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":35,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":38,"width":71.42857142857143,"height":50.0,"uid":null,"order":37,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":37}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":37}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":39,"width":38.0,"height":18.0,"uid":null,"order":39,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":37,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":443.4177215189873,"y":513.0,"rotation":0.0,"id":229,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":161,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":230,"width":15.0,"height":16.0,"uid":null,"order":163,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":630.0,"rotation":0.0,"id":63,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":40,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":64,"width":71.42857142857143,"height":50.0,"uid":null,"order":42,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":63}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":63}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":65,"width":68.0,"height":18.0,"uid":null,"order":44,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":63,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

producttion

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":454.99999999999994,"y":591.0,"rotation":0.0,"id":58,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":45,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":59,"width":110.00000000000001,"height":25.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":60}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":60,"width":110.00000000000001,"height":25.0,"uid":null,"order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":61,"width":110.00000000000001,"height":55.0,"uid":null,"order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":58},{"magnitude":-1,"id":60}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":60,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":580.0,"rotation":0.0,"id":55,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":53,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":56,"width":71.42857142857143,"height":50.0,"uid":null,"order":55,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":55}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":55}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":57,"width":28.0,"height":18.0,"uid":null,"order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":55,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

test

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":443.4177215189873,"y":646.0,"rotation":0.0,"id":221,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":155,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":222,"width":15.0,"height":16.0,"uid":null,"order":157,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

C

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":745.0,"rotation":0.0,"id":281,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":179,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":282,"width":71.42857142857143,"height":50.0,"uid":null,"order":181,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":281}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":281}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":283,"width":48.0,"height":18.0,"uid":null,"order":183,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":281,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

release

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":454.99999999999994,"y":706.0,"rotation":0.0,"id":277,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":184,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":278,"width":110.00000000000001,"height":25.0,"uid":null,"order":186,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":279}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":279,"width":110.00000000000001,"height":25.0,"uid":null,"order":189,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":280,"width":110.00000000000001,"height":55.0,"uid":null,"order":191,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":277},{"magnitude":-1,"id":279}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":279,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":695.0,"rotation":0.0,"id":274,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":192,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":71.42857142857143,"height":50.0,"uid":null,"order":194,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":274}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":274}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":276,"width":26.0,"height":18.0,"uid":null,"order":196,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":274,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

7.5

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":360.4891500904159,"y":510.0,"rotation":0.0,"id":289,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":197,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":290,"width":21.0,"height":18.0,"uid":null,"order":199,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 N

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":332.57142857142856,"y":532.0,"rotation":0.0,"id":301,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":205,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.4177215189874,"y":670.0,"rotation":0.0,"id":302,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":206,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":466.5822784810126,"y":667.0,"rotation":0.0,"id":303,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":207,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":621.401335443038,"y":508.0,"rotation":0.0,"id":306,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":209,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":621.401335443038,"y":459.0,"rotation":0.0,"id":307,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":210,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":621.401335443038,"y":589.0,"rotation":0.0,"id":308,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":211,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":186.21428571428567,"y":594.0,"rotation":0.0,"id":309,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":212,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":189.21428571428567,"y":644.0,"rotation":0.0,"id":310,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":213,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":810.0,"y":358.5,"rotation":0.0,"id":164,"width":217.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":110,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A offline key is used to create repository keys. Offline keys belong to a person or an organization. Resides client-side. You should store these in a safe place and back them up. 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":810.0,"y":487.5,"rotation":0.0,"id":170,"width":217.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":111,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A tagging key is associated with an image repository. publishers with this key can push or pull any tag in this repository. This resides on client-side.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":810.0,"y":587.0,"rotation":0.0,"id":298,"width":217.0,"height":42.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":203,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A timestamp key is associated with an image repository. This is created by Docker and resides on the server.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":743.3333333333334,"y":681.0,"rotation":0.0,"id":314,"width":283.66666666666663,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":215,"lockAspectRatio":false,"lockShape":false,"children":[{"x":66.66666666666663,"y":4.0,"rotation":0.0,"id":312,"width":217.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":214,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Signed tag.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":0.0,"rotation":0.0,"id":304,"width":33.333333333333336,"height":20.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":208,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"}],"layers":[{"guid":"dockVlz9GmcW","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":216}],"shapeStyles":{},"lineStyles":{"global":{"strokeWidth":1,"endArrow":17}},"textStyles":{"global":{"size":"16px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.cisco.cisco_v1.buildings","com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.bpmn.bpmn_v1.events","com.gliffy.libraries.bpmn.bpmn_v1.activities","com.gliffy.libraries.bpmn.bpmn_v1.data_artifacts","com.gliffy.libraries.bpmn.bpmn_v1.gateways","com.gliffy.libraries.bpmn.bpmn_v1.connectors","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images"],"lastSerialized":1439068390533},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/security/trust/images/trust_components.gliffy b/docs/security/trust/images/trust_components.gliffy new file mode 100644 index 00000000..07c859bb --- /dev/null +++ b/docs/security/trust/images/trust_components.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":881,"height":704,"nodeIndex":316,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":10,"y":10},"max":{"x":880.0000000000001,"y":703.7139999999999}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":10.0,"y":199.714,"rotation":0.0,"id":79,"width":531.0,"height":500.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":389.714,"rotation":0.0,"id":40,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":1,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":41,"width":71.42857142857143,"height":50.0,"uid":null,"order":3,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":40}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":40}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":42,"width":26.0,"height":18.0,"uid":null,"order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":40,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":82.1785714285715,"y":94.49999999999997,"rotation":0.0,"id":0,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.female_user","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.female_user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":43.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Person

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":219.714,"rotation":0.0,"id":2,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v4.business.user_group","order":9,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user_group","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":73.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Organization

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":141.0,"y":229.714,"rotation":0.0,"id":11,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.user","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":12,"width":48.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Account

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":350.714,"rotation":0.0,"id":16,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":15,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":17,"width":110.00000000000001,"height":25.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":18}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":18,"width":110.00000000000001,"height":25.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":19,"width":110.00000000000001,"height":55.0,"uid":null,"order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":16},{"magnitude":-1,"id":18}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":18,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":339.714,"rotation":0.0,"id":37,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":35,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":38,"width":71.42857142857143,"height":50.0,"uid":null,"order":37,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":37}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":37}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":39,"width":38.0,"height":18.0,"uid":null,"order":39,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":37,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":519.7139999999999,"rotation":0.0,"id":63,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":40,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":64,"width":71.42857142857143,"height":50.0,"uid":null,"order":42,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":63}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":63}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":65,"width":68.0,"height":18.0,"uid":null,"order":44,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":63,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

producttion

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":480.71399999999994,"rotation":0.0,"id":58,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":45,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":59,"width":110.00000000000001,"height":25.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":60}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":60,"width":110.00000000000001,"height":25.0,"uid":null,"order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":61,"width":110.00000000000001,"height":55.0,"uid":null,"order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":58},{"magnitude":-1,"id":60}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":60,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":469.714,"rotation":0.0,"id":55,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":53,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":56,"width":71.42857142857143,"height":50.0,"uid":null,"order":55,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":55}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":55}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":57,"width":28.0,"height":18.0,"uid":null,"order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":55,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

test

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":10.000000000000036,"y":209.714,"rotation":0.0,"id":82,"width":108.99999999999999,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":581.0,"y":229.714,"rotation":0.0,"id":86,"width":61.0,"height":79.0,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":87,"width":62.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Offline key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":581.0,"y":344.714,"rotation":0.0,"id":88,"width":61.0,"height":79.0,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":62,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":89,"width":70.0,"height":14.0,"uid":null,"order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Tagging key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":36.142857142857125,"y":476.71399999999994,"rotation":0.0,"id":109,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":98,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":74,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":99,"width":71.42857142857143,"height":50.0,"uid":null,"order":77,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":98}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":98}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":100,"width":50.0,"height":18.0,"uid":null,"order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":98,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

working

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":95,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":66,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":71.42857142857143,"height":50.0,"uid":null,"order":69,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":95}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":95}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":97,"width":38.0,"height":18.0,"uid":null,"order":72,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":95,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":30,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":24,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":31,"width":110.00000000000001,"height":25.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":32}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":32,"width":110.00000000000001,"height":25.0,"uid":null,"order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":33,"width":110.00000000000001,"height":55.0,"uid":null,"order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":30},{"magnitude":-1,"id":32}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":32,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":661.0,"y":248.214,"rotation":0.0,"id":164,"width":217.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":110,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A offline key is used to create tagging keys. Offline keys belong to a person or an organization. Resides client-side. You should store these in a safe place and back them up. 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":661.0,"y":377.214,"rotation":0.0,"id":170,"width":217.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":111,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A tagging key is associated with an image repository. Creators with this key can push or pull any tag in this repository. This resides on client-side.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":123.07142857142856,"y":10.0,"rotation":0.0,"id":171,"width":100.0,"height":132.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.speech_bubble_right","order":112,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"magnitude":1,"id":173},{"magnitude":1,"id":174}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":172,"width":100.0,"height":117.0,"uid":null,"order":114,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":171,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":171},{"magnitude":-1,"id":174}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":171}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":173,"width":100.0,"height":29.0,"uid":null,"order":117,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":171}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":36.0,"y":116.0,"rotation":0.0,"id":174,"width":24.0,"height":15.0,"uid":null,"order":119,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":24}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":15}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":172,"px":1.0,"py":1.0,"xOffset":-64.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble_right","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":135.41772151898738,"y":17.499999999999968,"rotation":0.0,"id":152,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":120,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":153,"width":14.0,"height":18.0,"uid":null,"order":122,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":294.4177215189873,"y":535.7139999999999,"rotation":0.0,"id":221,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":155,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":222,"width":15.0,"height":16.0,"uid":null,"order":157,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

C

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":211.48915009041588,"y":539.7139999999999,"rotation":0.0,"id":227,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":158,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":228,"width":16.0,"height":18.0,"uid":null,"order":160,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

X

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":294.4177215189873,"y":402.714,"rotation":0.0,"id":229,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":161,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":230,"width":15.0,"height":16.0,"uid":null,"order":163,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":185.57142857142856,"y":17.499999999999968,"rotation":0.0,"id":237,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":164,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":238,"width":16.0,"height":18.0,"uid":null,"order":166,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

X

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":161.5,"y":36.49999999999997,"rotation":0.0,"id":239,"width":20.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":167,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":152,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":237,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.9177215189872925,-4.0],[6.078661844484657,-4.0],[15.075045207956606,-4.0],[24.071428571428555,-4.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":266.0,"y":202.714,"rotation":0.0,"id":250,"width":7.0,"height":413.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":172,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":79,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[3.5,-3.0],[9.5,496.99999999999994]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":35.21428571428568,"y":339.714,"rotation":0.0,"id":253,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":173,"lockAspectRatio":false,"lockShape":false,"children":[{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":125,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":83,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":110.00000000000001,"height":25.0,"uid":null,"order":86,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":127}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":110.00000000000001,"height":25.0,"uid":null,"order":90,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":128,"width":110.00000000000001,"height":55.0,"uid":null,"order":93,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":125},{"magnitude":-1,"id":127}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":127,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":122,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":95,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":123,"width":71.42857142857143,"height":50.0,"uid":null,"order":98,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":122}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":122}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":124,"width":38.0,"height":18.0,"uid":null,"order":101,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":122,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":119,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":103,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":120,"width":71.42857142857143,"height":50.0,"uid":null,"order":106,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":119}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":119}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":121,"width":26.0,"height":18.0,"uid":null,"order":109,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":119,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":10.463999999999942,"rotation":0.0,"id":261,"width":155.08307142857143,"height":168.072,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":174,"lockAspectRatio":false,"lockShape":false,"children":[{"x":85.65449999999998,"y":38.0,"rotation":0.0,"id":245,"width":28.0,"height":43.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":171,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":204,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.510849909584124,-13.999999999999972],[16.0465641952984,-13.999999999999972],[16.0465641952984,39.0],[29.582278481012622,39.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":null},{"x":89.65449999999998,"y":25.0,"rotation":0.0,"id":244,"width":24.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":169,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":192,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-1.489150090415876,-0.9999999999999716],[7.534659433393642,-0.9999999999999716],[16.558468957203104,-0.9999999999999716],[25.582278481012622,-0.9999999999999716]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":null},{"x":115.2367784810126,"y":62.0,"rotation":0.0,"id":204,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":151,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":205,"width":15.0,"height":16.0,"uid":null,"order":154,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

C

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":115.2367784810126,"y":9.000000000000028,"rotation":0.0,"id":192,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":148,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":201,"width":15.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":65.0007929475588,"y":9.000000000000028,"rotation":0.0,"id":193,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":141,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":194,"width":14.0,"height":18.0,"uid":null,"order":144,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":55.08307142857143,"y":0.0,"rotation":0.0,"id":195,"width":100.0,"height":133.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.speech_bubble_right","order":129,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"magnitude":1,"id":197},{"magnitude":1,"id":198}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":196,"width":100.0,"height":118.0,"uid":null,"order":132,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":195,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":195},{"magnitude":-1,"id":198}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":195}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":197,"width":100.0,"height":29.0,"uid":null,"order":136,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":195}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":36.0,"y":117.0,"rotation":0.0,"id":198,"width":24.0,"height":15.0,"uid":null,"order":139,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":24}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":15}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":196,"px":1.0,"py":1.0,"xOffset":-64.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble_right","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":67.0,"rotation":0.0,"id":180,"width":67.309,"height":101.072,"uid":"com.gliffy.shape.cisco.cisco_v1.buildings.generic_building","order":126,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.buildings.generic_building","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":182,"width":56.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Company

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":184.8354430379747,"y":72.46399999999994,"rotation":0.0,"id":264,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":175,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":265,"width":21.0,"height":18.0,"uid":null,"order":177,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 N

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":162.0,"y":36.714,"rotation":0.0,"id":268,"width":18.0,"height":53.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":178,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":152,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":264,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-3.4177215189872925,-4.214000000000027],[9.708860759493689,-4.214000000000027],[9.708860759493689,50.74999999999994],[22.8354430379747,50.74999999999994]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":634.7139999999999,"rotation":0.0,"id":281,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":179,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":282,"width":71.42857142857143,"height":50.0,"uid":null,"order":181,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":281}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":281}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":283,"width":48.0,"height":18.0,"uid":null,"order":183,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":281,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

release

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":595.7139999999999,"rotation":0.0,"id":277,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":184,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":278,"width":110.00000000000001,"height":25.0,"uid":null,"order":186,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":279}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":279,"width":110.00000000000001,"height":25.0,"uid":null,"order":189,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":280,"width":110.00000000000001,"height":55.0,"uid":null,"order":191,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":277},{"magnitude":-1,"id":279}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":279,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":584.7139999999999,"rotation":0.0,"id":274,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":192,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":71.42857142857143,"height":50.0,"uid":null,"order":194,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":274}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":274}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":276,"width":26.0,"height":18.0,"uid":null,"order":196,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":274,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

7.5

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":211.48915009041588,"y":399.714,"rotation":0.0,"id":289,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":197,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":290,"width":21.0,"height":18.0,"uid":null,"order":199,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 N

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":584.0,"y":467.714,"rotation":0.0,"id":294,"width":54.0,"height":54.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":200,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":297,"width":88.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Timestamp Key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":661.0,"y":476.714,"rotation":0.0,"id":298,"width":217.0,"height":42.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":203,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A timestamp key is associated with an image repository. This is created by Docker and resides on the server.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":316.5822784810126,"y":420.714,"rotation":0.0,"id":299,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":204,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":183.57142857142856,"y":421.714,"rotation":0.0,"id":301,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":205,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":181.41772151898738,"y":559.7139999999999,"rotation":0.0,"id":302,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":206,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":317.5822784810126,"y":556.7139999999999,"rotation":0.0,"id":303,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":207,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":397.714,"rotation":0.0,"id":306,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":209,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":348.714,"rotation":0.0,"id":307,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":210,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":478.714,"rotation":0.0,"id":308,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":211,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":37.214285714285666,"y":483.714,"rotation":0.0,"id":309,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":212,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":40.214285714285666,"y":533.7139999999999,"rotation":0.0,"id":310,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":213,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":594.3333333333335,"y":570.7139999999999,"rotation":0.0,"id":314,"width":283.66666666666663,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":215,"lockAspectRatio":false,"lockShape":false,"children":[{"x":66.66666666666663,"y":4.0,"rotation":0.0,"id":312,"width":217.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":214,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Signed tag.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":0.0,"rotation":0.0,"id":304,"width":33.333333333333336,"height":20.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":208,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"}],"layers":[{"guid":"dockVlz9GmcW","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":216}],"shapeStyles":{},"lineStyles":{"global":{"strokeWidth":1,"endArrow":17}},"textStyles":{"global":{"size":"16px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.cisco.cisco_v1.buildings","com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.bpmn.bpmn_v1.events","com.gliffy.libraries.bpmn.bpmn_v1.activities","com.gliffy.libraries.bpmn.bpmn_v1.data_artifacts","com.gliffy.libraries.bpmn.bpmn_v1.gateways","com.gliffy.libraries.bpmn.bpmn_v1.connectors","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images"],"lastSerialized":1439174260766},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/security/trust/images/trust_components.png b/docs/security/trust/images/trust_components.png new file mode 100644 index 00000000..039dfc8c Binary files /dev/null and b/docs/security/trust/images/trust_components.png differ diff --git a/docs/security/trust/images/trust_signing.gliffy b/docs/security/trust/images/trust_signing.gliffy new file mode 100644 index 00000000..b21fa366 --- /dev/null +++ b/docs/security/trust/images/trust_signing.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":881,"height":627,"nodeIndex":322,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":10,"y":0},"max":{"x":880.0000000000001,"y":626.25}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":10.0,"y":122.25000000000006,"rotation":0.0,"id":79,"width":531.0,"height":500.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":312.25000000000006,"rotation":0.0,"id":40,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":1,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":41,"width":71.42857142857143,"height":50.0,"uid":null,"order":3,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":40}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":40}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":42,"width":26.0,"height":18.0,"uid":null,"order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":40,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":82.1785714285715,"y":17.03600000000003,"rotation":0.0,"id":0,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.female_user","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.female_user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":43.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Person

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":142.25000000000006,"rotation":0.0,"id":2,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v4.business.user_group","order":9,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user_group","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":73.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Organization

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":141.0,"y":152.25000000000006,"rotation":0.0,"id":11,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.user","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":12,"width":48.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Account

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":273.25000000000006,"rotation":0.0,"id":16,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":15,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":17,"width":110.00000000000001,"height":25.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":18}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":18,"width":110.00000000000001,"height":25.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":19,"width":110.00000000000001,"height":55.0,"uid":null,"order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":16},{"magnitude":-1,"id":18}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":18,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":262.25000000000006,"rotation":0.0,"id":37,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":35,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":38,"width":71.42857142857143,"height":50.0,"uid":null,"order":37,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":37}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":37}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":39,"width":38.0,"height":18.0,"uid":null,"order":39,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":37,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":442.25000000000006,"rotation":0.0,"id":63,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":40,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":64,"width":71.42857142857143,"height":50.0,"uid":null,"order":42,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":63}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":63}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":65,"width":68.0,"height":18.0,"uid":null,"order":44,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":63,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

producttion

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":403.25000000000006,"rotation":0.0,"id":58,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":45,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":59,"width":110.00000000000001,"height":25.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":60}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":60,"width":110.00000000000001,"height":25.0,"uid":null,"order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":61,"width":110.00000000000001,"height":55.0,"uid":null,"order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":58},{"magnitude":-1,"id":60}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":60,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":392.25000000000006,"rotation":0.0,"id":55,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":53,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":56,"width":71.42857142857143,"height":50.0,"uid":null,"order":55,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":55}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":55}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":57,"width":28.0,"height":18.0,"uid":null,"order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":55,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

test

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":10.000000000000036,"y":132.25000000000006,"rotation":0.0,"id":82,"width":108.99999999999999,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":36.142857142857125,"y":399.25000000000006,"rotation":0.0,"id":109,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":98,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":74,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":99,"width":71.42857142857143,"height":50.0,"uid":null,"order":77,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":98}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":98}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":100,"width":50.0,"height":18.0,"uid":null,"order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":98,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

working

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":95,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":66,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":71.42857142857143,"height":50.0,"uid":null,"order":69,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":95}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":95}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":97,"width":38.0,"height":18.0,"uid":null,"order":72,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":95,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":30,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":24,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":31,"width":110.00000000000001,"height":25.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":32}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":32,"width":110.00000000000001,"height":25.0,"uid":null,"order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":33,"width":110.00000000000001,"height":55.0,"uid":null,"order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":30},{"magnitude":-1,"id":32}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":32,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":0.0,"rotation":0.0,"id":180,"width":67.309,"height":101.072,"uid":"com.gliffy.shape.cisco.cisco_v1.buildings.generic_building","order":126,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.buildings.generic_building","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":182,"width":56.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Company

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":266.0,"y":125.25000000000006,"rotation":0.0,"id":250,"width":7.0,"height":413.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":172,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":79,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[3.5,-3.0],[9.5,496.99999999999994]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":35.21428571428568,"y":262.25000000000006,"rotation":0.0,"id":253,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":173,"lockAspectRatio":false,"lockShape":false,"children":[{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":125,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":83,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":110.00000000000001,"height":25.0,"uid":null,"order":86,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":127}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":110.00000000000001,"height":25.0,"uid":null,"order":90,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":128,"width":110.00000000000001,"height":55.0,"uid":null,"order":93,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":125},{"magnitude":-1,"id":127}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":127,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":122,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":95,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":123,"width":71.42857142857143,"height":50.0,"uid":null,"order":98,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":122}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":122}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":124,"width":38.0,"height":18.0,"uid":null,"order":101,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":122,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":119,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":103,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":120,"width":71.42857142857143,"height":50.0,"uid":null,"order":106,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":119}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":119}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":121,"width":26.0,"height":18.0,"uid":null,"order":109,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":119,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":557.25,"rotation":0.0,"id":281,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":179,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":282,"width":71.42857142857143,"height":50.0,"uid":null,"order":181,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":281}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":281}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":283,"width":48.0,"height":18.0,"uid":null,"order":183,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":281,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

release

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":518.25,"rotation":0.0,"id":277,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":184,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":278,"width":110.00000000000001,"height":25.0,"uid":null,"order":186,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":279}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":279,"width":110.00000000000001,"height":25.0,"uid":null,"order":189,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":280,"width":110.00000000000001,"height":55.0,"uid":null,"order":191,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":277},{"magnitude":-1,"id":279}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":279,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":507.25,"rotation":0.0,"id":274,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":192,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":71.42857142857143,"height":50.0,"uid":null,"order":194,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":274}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":274}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":276,"width":26.0,"height":18.0,"uid":null,"order":196,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":274,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

7.5

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":320.25000000000006,"rotation":0.0,"id":306,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":209,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":271.25000000000006,"rotation":0.0,"id":307,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":210,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":401.25000000000006,"rotation":0.0,"id":308,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":211,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":37.214285714285666,"y":406.25000000000006,"rotation":0.0,"id":309,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":212,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":40.214285714285666,"y":456.25000000000006,"rotation":0.0,"id":310,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":213,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":594.3333333333335,"y":493.25000000000006,"rotation":0.0,"id":314,"width":283.66666666666663,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":215,"lockAspectRatio":false,"lockShape":false,"children":[{"x":66.66666666666663,"y":4.0,"rotation":0.0,"id":312,"width":217.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":214,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Signed tag.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":0.0,"rotation":0.0,"id":304,"width":33.333333333333336,"height":20.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":208,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"}],"layers":[{"guid":"dockVlz9GmcW","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":216}],"shapeStyles":{},"lineStyles":{"global":{"strokeWidth":1,"endArrow":17}},"textStyles":{"global":{"size":"16px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.cisco.cisco_v1.buildings","com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.bpmn.bpmn_v1.events","com.gliffy.libraries.bpmn.bpmn_v1.activities","com.gliffy.libraries.bpmn.bpmn_v1.data_artifacts","com.gliffy.libraries.bpmn.bpmn_v1.gateways","com.gliffy.libraries.bpmn.bpmn_v1.connectors","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images"],"lastSerialized":1439068922785},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/security/trust/images/trust_signing.png b/docs/security/trust/images/trust_signing.png new file mode 100644 index 00000000..4a941be1 Binary files /dev/null and b/docs/security/trust/images/trust_signing.png differ diff --git a/docs/security/trust/images/trust_view.gliffy b/docs/security/trust/images/trust_view.gliffy new file mode 100644 index 00000000..b635e657 --- /dev/null +++ b/docs/security/trust/images/trust_view.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":866,"height":537,"nodeIndex":323,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":10,"y":0},"max":{"x":865.6666666666666,"y":536.25}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":10.0,"y":122.25000000000006,"rotation":0.0,"id":79,"width":531.0,"height":409.99999999999994,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":312.25000000000006,"rotation":0.0,"id":40,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":1,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":41,"width":71.42857142857143,"height":50.0,"uid":null,"order":3,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":40}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":40}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":42,"width":26.0,"height":18.0,"uid":null,"order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":40,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":82.1785714285715,"y":17.03600000000003,"rotation":0.0,"id":0,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.female_user","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.female_user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":43.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Person

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":142.25000000000006,"rotation":0.0,"id":2,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v4.business.user_group","order":9,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user_group","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":73.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Organization

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":141.0,"y":152.25000000000006,"rotation":0.0,"id":11,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.user","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":12,"width":48.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Account

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":273.25000000000006,"rotation":0.0,"id":16,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":15,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":17,"width":110.00000000000001,"height":25.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":18}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":18,"width":110.00000000000001,"height":25.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":19,"width":110.00000000000001,"height":55.0,"uid":null,"order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":16},{"magnitude":-1,"id":18}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":18,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":262.25000000000006,"rotation":0.0,"id":37,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":35,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":38,"width":71.42857142857143,"height":50.0,"uid":null,"order":37,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":37}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":37}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":39,"width":38.0,"height":18.0,"uid":null,"order":39,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":37,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":442.25000000000006,"rotation":0.0,"id":63,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":40,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":64,"width":71.42857142857143,"height":50.0,"uid":null,"order":42,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":63}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":63}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":65,"width":68.0,"height":18.0,"uid":null,"order":44,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":63,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

producttion

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":403.25000000000006,"rotation":0.0,"id":58,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":45,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":59,"width":110.00000000000001,"height":25.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":60}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":60,"width":110.00000000000001,"height":25.0,"uid":null,"order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":61,"width":110.00000000000001,"height":55.0,"uid":null,"order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":58},{"magnitude":-1,"id":60}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":60,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":392.25000000000006,"rotation":0.0,"id":55,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":53,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":56,"width":71.42857142857143,"height":50.0,"uid":null,"order":55,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":55}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":55}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":57,"width":28.0,"height":18.0,"uid":null,"order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":55,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

test

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":10.000000000000036,"y":132.25000000000006,"rotation":0.0,"id":82,"width":108.99999999999999,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":36.142857142857125,"y":399.25000000000006,"rotation":0.0,"id":109,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":98,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":74,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":99,"width":71.42857142857143,"height":50.0,"uid":null,"order":77,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":98}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":98}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":100,"width":50.0,"height":18.0,"uid":null,"order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":98,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

working

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":95,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":66,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":71.42857142857143,"height":50.0,"uid":null,"order":69,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":95}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":95}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":97,"width":38.0,"height":18.0,"uid":null,"order":72,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":95,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":30,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":24,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":31,"width":110.00000000000001,"height":25.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":32}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":32,"width":110.00000000000001,"height":25.0,"uid":null,"order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":33,"width":110.00000000000001,"height":55.0,"uid":null,"order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":30},{"magnitude":-1,"id":32}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":32,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":0.0,"rotation":0.0,"id":180,"width":67.309,"height":101.072,"uid":"com.gliffy.shape.cisco.cisco_v1.buildings.generic_building","order":126,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.buildings.generic_building","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":182,"width":56.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Company

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":266.0,"y":125.25000000000006,"rotation":0.0,"id":250,"width":7.0,"height":413.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":172,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":79,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[3.5,-3.0],[9.5,406.99999999999994]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":320.25000000000006,"rotation":0.0,"id":306,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":209,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":271.25000000000006,"rotation":0.0,"id":307,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":210,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":401.25000000000006,"rotation":0.0,"id":308,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":211,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":37.214285714285666,"y":406.25000000000006,"rotation":0.0,"id":309,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":212,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":40.214285714285666,"y":456.25000000000006,"rotation":0.0,"id":310,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":213,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":580.0,"y":418.25000000000006,"rotation":0.0,"id":314,"width":283.66666666666663,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":215,"lockAspectRatio":false,"lockShape":false,"children":[{"x":66.66666666666663,"y":4.0,"rotation":0.0,"id":312,"width":217.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":214,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Signed tag.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":0.0,"rotation":0.0,"id":304,"width":33.333333333333336,"height":20.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":208,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"}],"layers":[{"guid":"dockVlz9GmcW","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":216}],"shapeStyles":{},"lineStyles":{"global":{"strokeWidth":1,"endArrow":17}},"textStyles":{"global":{"size":"16px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.cisco.cisco_v1.buildings","com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.bpmn.bpmn_v1.events","com.gliffy.libraries.bpmn.bpmn_v1.activities","com.gliffy.libraries.bpmn.bpmn_v1.data_artifacts","com.gliffy.libraries.bpmn.bpmn_v1.gateways","com.gliffy.libraries.bpmn.bpmn_v1.connectors","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images"],"lastSerialized":1439069097667},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/docs/security/trust/images/trust_view.png b/docs/security/trust/images/trust_view.png new file mode 100644 index 00000000..71eb26ce Binary files /dev/null and b/docs/security/trust/images/trust_view.png differ diff --git a/docs/security/trust/index.md b/docs/security/trust/index.md new file mode 100644 index 00000000..2264c5df --- /dev/null +++ b/docs/security/trust/index.md @@ -0,0 +1,21 @@ + + +# Use trusted images + +The following topics are available: + +* [Content trust in Docker](/security/trust/content_trust) +* [Manage keys for content trust](/security/trust/trust_key_mng) +* [Automation with content trust](/security/trust/trust_automation) +* [Play in a content trust sandbox](/security/trust/trust_sandbox) + diff --git a/docs/security/trust/trust_automation.md b/docs/security/trust/trust_automation.md new file mode 100644 index 00000000..0808d8ce --- /dev/null +++ b/docs/security/trust/trust_automation.md @@ -0,0 +1,79 @@ + + +# Automation with content trust + +Your automation systems that pull or build images can also work with trust. Any automation environment must set `DOCKER_TRUST_ENABLED` either manually or in in a scripted fashion before processing images. + +## Bypass requests for passphrases + +To allow tools to wrap docker and push trusted content, there are two +environment variables that allow you to provide the passphrases without an +expect script, or typing them in: + + - `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` + - `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` + +Docker attempts to use the contents of these environment variables as passphrase +for the keys. For example, an image publisher can export the repository `target` +and `snapshot` passphrases: + +```bash +$ export DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE="u7pEQcGoebUHm6LHe6" +$ export DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE="l7pEQcTKJjUHm6Lpe4" +``` + +Then, when pushing a new tag the Docker client does not request these values but signs automatically: + +``bash +$ docker push docker/trusttest:latest +The push refers to a repository [docker.io/docker/trusttest] (len: 1) +a9539b34a6ab: Image already exists +b3dbab3810fc: Image already exists +latest: digest: sha256:d149ab53f871 size: 3355 +Signing and pushing trust metadata +``` + +## Building with content trust + +You can also build with content trust. Before running the `docker build` command, you should set the environment variable `DOCKER_CONTENT_TRUST` either manually or in in a scripted fashion. Consider the simple Dockerfile below. + +```Dockerfilea +FROM docker/trusttest:latest +RUN echo +``` + +The `FROM` tag is pulling a signed image. You cannot build an image that has a +`FROM` that is not either present locally or signed. Given that content trust +data exists for the tag `latest`, the following build should succeed: + +```bash +$ docker build -t docker/trusttest:testing . +Using default tag: latest +latest: Pulling from docker/trusttest + +b3dbab3810fc: Pull complete +a9539b34a6ab: Pull complete +Digest: sha256:d149ab53f871 +``` + +If content trust is enabled, building from a Dockerfile that relies on tag without trust data, causes the build command to fail: + +```bash +$ docker build -t docker/trusttest:testing . +unable to process Dockerfile: No trust data for notrust +``` + +## Related information + +* [Content trust in Docker](/security/trust/content_trust) +* [Manage keys for content trust](/security/trust/trust_key_mng) +* [Play in a content trust sandbox](/security/trust/trust_sandbox) + diff --git a/docs/security/trust/trust_key_mng.md b/docs/security/trust/trust_key_mng.md new file mode 100644 index 00000000..a9bd02b7 --- /dev/null +++ b/docs/security/trust/trust_key_mng.md @@ -0,0 +1,74 @@ + + +# Manage keys for content trust + +Trust for an image tag is managed through the use of keys. Docker's content +trust makes use four different keys: + +| Key | Description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| offline key | Root of content trust for a image tag. When content trust is enabled, you create the offline key once. | +| target and snapshot | These two keys are known together as the "tagging" key. When content trust is enabled, you create this key when you add a new image repository. If you have the offline key, you can export the tagging key and allow other publishers to sign the image tags. | +| timestamp | This key applies to a repository. It allows Docker repositories to have freshness security guarantees without requiring periodic content refreshes on the client's side. | + +With the exception of the timestamp, all the keys are generated and stored locally +client-side. The timestamp is safely generated and stored in a signing server that +is deployed alongside the Docker registry. All keys are generated in a backend +service that isn't directly exposed to the internet and are encrypted at rest. + +## Choosing a passphrase + +The passphrases you chose for both the offline key and your tagging key should +be randomly generated and stored in a password manager. Having the tagging key +allow users to sign image tags on a repository. Passphrases are used to encrypt +your keys at rest and ensures that a lost laptop or an unintended backup doesn't +put the private key material at risk. + +## Back up your keys + +All the Docker trust keys are stored encrypted using the passphrase you provide +on creation. Even so, you should still take care of the location where you back them up. +Good practice is to create two encrypted USB keys. + +It is very important that you backup your keys to a safe, secure location. Loss +of the tagging key is recoverable; loss of the offline key is not. + +The Docker client stores the keys in the `~/.docker/trust/private` directory. +Before backing them up, you should `tar` them into an archive: + +```bash +$ tar -zcvf private_keys_backup.tar.gz ~/.docker/trust/private +$ chmod 600 private_keys_backup.tar.gz +``` + +## Lost keys + +If a publisher loses keys it means losing the ability to sign trusted content for +your repositories. If you lose a key, contact [Docker +Support](https://support.docker.com) (support@docker.com) to reset the repository +state. + +This loss also requires **manual intervention** from every consumer that pulled +the tagged image prior to the loss. Image consumers would get an error for +content that they already downloaded: + +``` +could not validate the path to a trusted root: failed to validate data with current trusted certificates +``` + +To correct this, they need to download a new image tag with that is signed with +the new key. + +## Related information + +* [Content trust in Docker](/security/trust/content_trust) +* [Automation with content trust](/security/trust/trust_automation) +* [Play in a content trust sandbox](/security/trust/trust_sandbox) diff --git a/docs/security/trust/trust_sandbox.md b/docs/security/trust/trust_sandbox.md new file mode 100644 index 00000000..68b14910 --- /dev/null +++ b/docs/security/trust/trust_sandbox.md @@ -0,0 +1,331 @@ + + +# Play in a content trust sandbox + +This page explains how to set up and use a sandbox for experimenting with trust. +The sandbox allows you to configure and try trust operations locally without +impacting your production images. + +Before working through this sandbox, you should have read through the [trust +overview](content_trust.md). + +### Prerequisites + +These instructions assume you are running in Linux or Mac OS X. You can run +this sandbox on a local machine or on a virtual machine. You will need to +have `sudo` privileges on your local machine or in the VM. + +This sandbox requires you to install two Docker tools: Docker Engine and Docker +Compose. To install the Docker Engine, choose from the [list of supported +platforms]({{< relref "installation.md" >}}). To install Docker Compose, see the +[detailed instructions here]({{< relref "compose/install" >}}). + +Finally, you'll need to have `git` installed on your local system or VM. + +## What is in the sandbox? + +If you are just using trust out-of-the-box you only need your Docker Engine +client and access to Docker's own public hub. The sandbox mimics a +production trust environment, and requires these additional components: + +| Container | Description | +|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------| +| nostarysandbox | A container with the latest version of Docker Engine and with some preconfigured certifications. This is your sandbox where you can use the `docker` client to test trust operations. | +| Registry server | A local registry service. | +| Notary server | The service that does all the heavy-lifting of managing trust | +| Notary signer | A service that ensures that your keys are secure. | +| MySQL | The database where all of the trust information will be stored | + +The sandbox uses the Docker daemon on your local system. Within the `nostarysandbox` +you interact with a local registry rather than the public Docker Hub. This means +your everyday image repositories are not used. They are protected while you play. + +When you play in the sandbox, you'll also create root and tagging keys. The +sandbox is configured to store all the keys and files inside the `notarysandbox` +container. Since the keys you create in the sandbox are for play only, +destroying the container destroys them as well. + + +## Build the sandbox + +In this section, you build the Docker components for your trust sandbox. If you +work exclusively with the Docker Hub, you would not need with these components. +They are built into the Docker Hub for you. For the sandbox, however, you must +build your own entire, mock production environment and registry. + +### Configure /etc/hosts + +The sandbox' `notaryserver` and `sandboxregistry` run on your local server. The +client inside the `notarysandbox` container connects to them over your network. +So, you'll need an entry for both the servers in your local `/etc/hosts` file. + +1. Add an entry for the `notaryserver` to `/etc/hosts`. + + $ sudo sh -c 'echo "127.0.0.1 notaryserver" >> /etc/hosts' + +2. Add an entry for the `sandboxregistry` to `/etc/hosts`. + + $ sudo sh -c 'echo "127.0.0.1 sandboxregistry" >> /etc/hosts' + + +### Build the notarytest image + +1. Create a `notarytest` directory on your system. + + $ mkdir notarysandbox + +2. Change into your `notarysandbox` directory. + + $ cd notarysandbox + +3. Create a `notarytest` directory then change into that. + + $ mkdir notarytest + $ cd nostarytest + +4. Create a filed called `Dockerfile` with your favorite editor. + +5. Add the following to the new file. + + FROM debian:jessie + + ADD https://master.dockerproject.org/linux/amd64/docker /usr/bin/docker + RUN chmod +x /usr/bin/docker \ + && apt-get update \ + && apt-get install -y \ + tree \ + vim \ + git \ + ca-certificates \ + --no-install-recommends + + WORKDIR /root + RUN git clone -b trust-sandbox https://github.com/docker/notary.git + RUN cp /root/notary/fixtures/root-ca.crt /usr/local/share/ca-certificates/root-ca.crt + RUN update-ca-certificates + + ENTRYPOINT ["bash"] + +6. Save and close the file. + +7. Build the testing container. + + $ docker build -t nostarysandbox . + Sending build context to Docker daemon 2.048 kB + Step 0 : FROM debian:jessie + ... + Successfully built 5683f17e9d72 + + +### Build and start up the trust servers + +In this step, you get the source code for your notary and registry services. +Then, you'll use Docker Compose to build and start them on your local system. + +1. Change to back to the root of your `notarysandbox` directory. + + $ cd notarysandbox + +2. Clone the `notary` project. + + $ git clone -b trust-sandbox https://github.com/docker/notary.git + +3. Clone the `distribution` project. + + $ git clone https://github.com/docker/distribution.git + +4. Change to the `notary` project directory. + + $ cd notary + + The directory contains a `docker-compose` file that you'll use to run a + notary server together with a notary signer and the corresponding MySQL + databases. The databases store the trust information for an image. + +5. Build the server images. + + $ docker-compose build + + The first time you run this, the build takes some time. + +6. Run the server containers on your local system. + + $ docker-compose up -d + + Once the trust services are up, you'll setup a local version of the Docker + Registry v2. + +7. Change to the `nostarysandbox/distribution` directory. + +8. Build the `sandboxregistry` server. + + $ docker build -t sandboxregistry . + +9. Start the `sandboxregistry` server running. + + $ docker run -p 5000:5000 --name sandboxregistry sandboxregistry & + +## Playing in the sandbox + +Now that everything is setup, you can go into your `nostarysandbox` container and +start testing Docker content trust. + + +### Start the notarysandbox container + +In this procedure, you start the `notarysandbox` and link it to the running +`notary_notaryserver_1` and `sandboxregistry` containers. The links allow +communication among the containers. + +``` +$ docker run -it -v /var/run/docker.sock:/var/run/docker.sock --link notary_notaryserver_1:notaryserver --link sandboxregistry:sandboxregistry nostarysandbox +root@0710762bb59a:/# +``` + +Mounting the `docker.sock` gives the `nostarysandbox` access to the `docker` +deamon on your host, while storing all the keys and files inside the sandbox +container. When you destroy the container, you destroy the "play" keys. + +### Test some trust operations + +Now, you'll pull some images. + +1. Download a `docker` image to test with. + + # docker pull docker/trusttest + docker pull docker/trusttest + Using default tag: latest + latest: Pulling from docker/trusttest + + b3dbab3810fc: Pull complete + a9539b34a6ab: Pull complete + Digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a + Status: Downloaded newer image for docker/trusttest:latest + +2. Tag it to be pushed to our sandbox registry: + + # docker tag docker/trusttest sandboxregistry:5000/test/trusttest:latest + +3. Enable content trust. + + # export DOCKER_CONTENT_TRUST=1 + +4. Identify the trust server. + + # export DOCKER_CONTENT_TRUST_SERVER=https://notaryserver:4443 + + This step is only necessary because the sandbox is using its own server. + Normally, if you are using the Docker Public Hub this step isn't necessary. + +5. Pull the test image. + + # docker pull sandboxregistry:5000/test/trusttest + Using default tag: latest + no trust data available + + You see an error, because this content doesn't exist on the `sandboxregistry` yet. + +6. Push the trusted image. + + # docker push sandboxregistry:5000/test/trusttest:latest + The push refers to a repository [sandboxregistry:5000/test/trusttest] (len: 1) + a9539b34a6ab: Image successfully pushed + b3dbab3810fc: Image successfully pushed + latest: digest: sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c size: 3348 + Signing and pushing trust metadata + You are about to create a new root signing key passphrase. This passphrase + will be used to protect the most sensitive key in your signing system. Please + choose a long, complex passphrase and be careful to keep the password and the + key file itself secure and backed up. It is highly recommended that you use a + password manager to generate the passphrase and keep it safe. There will be no + way to recover this key. You can find the key in your config directory. + Enter passphrase for new offline key with id 8c69e04: + Repeat passphrase for new offline key with id 8c69e04: + Enter passphrase for new tagging key with id sandboxregistry:5000/test/trusttest (93c362a): + Repeat passphrase for new tagging key with id sandboxregistry:5000/test/trusttest (93c362a): + Finished initializing "sandboxregistry:5000/test/trusttest" + latest: digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a size: 3355 + Signing and pushing trust metadata + +7. Try pulling the image you just pushed: + + # docker pull sandboxregistry:5000/test/trusttest + Using default tag: latest + Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c + sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c: Pulling from test/trusttest + b3dbab3810fc: Already exists + a9539b34a6ab: Already exists + Digest: sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c + Status: Downloaded newer image for sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c + Tagging sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c as sandboxregistry:5000/test/trusttest:latest + + +### Test with malicious images + +What happens when data is corrupted and you try to pull it when trust is +enabled? In this section, you go into the `sandboxregistry` and tamper with some +data. Then, you try and pull it. + +1. Leave the sandbox container running. + +2. Open a new bash terminal from your host into the `sandboxregistry`. + + $ docker exec -it sandboxregistry bash + 296db6068327# + +3. Change into the registry storage. + + You'll need to provide the `sha` you received when you pushed the image. + + # cd /var/lib/registry/docker/registry/v2/blobs/sha256/aa/aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042 + +4. Add malicious data to one of the trusttest layers: + + # echo "Malicious data" > data + +5. Got back to your sandbox terminal. + +6. List the trusttest image. + + # docker images | grep trusttest + docker/trusttest latest a9539b34a6ab 7 weeks ago 5.025 MB + sandboxregistry:5000/test/trusttest latest a9539b34a6ab 7 weeks ago 5.025 MB + sandboxregistry:5000/test/trusttest a9539b34a6ab 7 weeks ago 5.025 MB + +7. Remove the `trusttest:latest` image. + + # docker rmi -f a9539b34a6ab + Untagged: docker/trusttest:latest + Untagged: sandboxregistry:5000/test/trusttest:latest + Untagged: sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c + Deleted: a9539b34a6aba01d3942605dfe09ab821cd66abf3cf07755b0681f25ad81f675 + Deleted: b3dbab3810fc299c21f0894d39a7952b363f14520c2f3d13443c669b63b6aa20 + +8. Pull the image again. + + # docker pull sandboxregistry:5000/test/trusttest + Using default tag: latest + ... + b3dbab3810fc: Verifying Checksum + a9539b34a6ab: Pulling fs layer + filesystem layer verification failed for digest sha256:aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042 + + You'll see the the pull did not complete because the trust system was + unable to verify the image. + +## More play in the sandbox + +Now, that you have a full Docker content trust sandbox on your local system, +feel free to play with it and see how it behaves. If you find any security +issues with Docker, feel free to send us an email at . + + +  \ No newline at end of file diff --git a/docs/static_files/README.md b/docs/static_files/README.md new file mode 100644 index 00000000..f09a25af --- /dev/null +++ b/docs/static_files/README.md @@ -0,0 +1,11 @@ +Static files dir +================ + +Files you put in /static_files/ will be copied to the web visible /_static/ + +Be careful not to override pre-existing static files from the template. + +Generally, layout related files should go in the /theme directory. + +If you want to add images to your particular documentation page. Just put them next to +your .rst source file and reference them relatively. \ No newline at end of file diff --git a/docs/static_files/contributors.png b/docs/static_files/contributors.png new file mode 100644 index 00000000..63c0a0c0 Binary files /dev/null and b/docs/static_files/contributors.png differ diff --git a/docs/static_files/docker-logo-compressed.png b/docs/static_files/docker-logo-compressed.png new file mode 100644 index 00000000..717d09d7 Binary files /dev/null and b/docs/static_files/docker-logo-compressed.png differ diff --git a/docs/static_files/docker_pull_chart.png b/docs/static_files/docker_pull_chart.png new file mode 100644 index 00000000..57d3f68d Binary files /dev/null and b/docs/static_files/docker_pull_chart.png differ diff --git a/docs/static_files/docker_push_chart.png b/docs/static_files/docker_push_chart.png new file mode 100644 index 00000000..34b37073 Binary files /dev/null and b/docs/static_files/docker_push_chart.png differ diff --git a/docs/static_files/dockerlogo-v.png b/docs/static_files/dockerlogo-v.png new file mode 100644 index 00000000..69ae6851 Binary files /dev/null and b/docs/static_files/dockerlogo-v.png differ diff --git a/docs/terms/container.md b/docs/terms/container.md new file mode 100644 index 00000000..f52f728d --- /dev/null +++ b/docs/terms/container.md @@ -0,0 +1,51 @@ + + +# Container + +## Introduction + +![](/terms/images/docker-filesystems-busyboxrw.png) + +Once you start a process in Docker from an [*Image*](/terms/image), Docker +fetches the image and its [*Parent Image*](/terms/image), and repeats the +process until it reaches the [*Base Image*](/terms/image/#base-image-def). Then +the [*Union File System*](/terms/layer) adds a read-write layer on top. That +read-write layer, plus the information about its [*Parent +Image*](/terms/image) +and some additional information like its unique id, networking +configuration, and resource limits is called a **container**. + +## Container state + +Containers can change, and so they have state. A container may be +**running** or **exited**. + +When a container is running, the idea of a "container" also includes a +tree of processes running on the CPU, isolated from the other processes +running on the host. + +When the container is exited, the state of the file system and its exit +value is preserved. You can start, stop, and restart a container. The +processes restart from scratch (their memory state is **not** preserved +in a container), but the file system is just as it was when the +container was stopped. + +You can promote a container to an [*Image*](/terms/image) with `docker commit`. +Once a container is an image, you can use it as a parent for new containers. + +## Container IDs + +All containers are identified by a 64 hexadecimal digit string +(internally a 256bit value). To simplify their use, a short ID of the +first 12 characters can be used on the command line. There is a small +possibility of short id collisions, so the docker server will always +return the long ID. diff --git a/docs/terms/filesystem.md b/docs/terms/filesystem.md new file mode 100644 index 00000000..d0e0e36f --- /dev/null +++ b/docs/terms/filesystem.md @@ -0,0 +1,42 @@ + + +# File system + +## Introduction + +![](/terms/images/docker-filesystems-generic.png) + +In order for a Linux system to run, it typically needs two [file +systems](http://en.wikipedia.org/wiki/Filesystem): + +1. boot file system (bootfs) +2. root file system (rootfs) + +The **boot file system** contains the bootloader and the kernel. The +user never makes any changes to the boot file system. In fact, soon +after the boot process is complete, the entire kernel is in memory, and +the boot file system is unmounted to free up the RAM associated with the +initrd disk image. + +The **root file system** includes the typical directory structure we +associate with Unix-like operating systems: +`/dev, /proc, /bin, /etc, /lib, /usr,` and `/tmp` plus all the configuration +files, binaries and libraries required to run user applications (like bash, +ls, and so forth). + +While there can be important kernel differences between different Linux +distributions, the contents and organization of the root file system are +usually what make your software packages dependent on one distribution +versus another. Docker can help solve this problem by running multiple +distributions at the same time. + +![](/terms/images/docker-filesystems-multiroot.png) diff --git a/docs/terms/image.md b/docs/terms/image.md new file mode 100644 index 00000000..a902882b --- /dev/null +++ b/docs/terms/image.md @@ -0,0 +1,47 @@ + + +# Image + +## Introduction + +![](/terms/images/docker-filesystems-debian.png) + +In Docker terminology, a read-only [*Layer*](/terms/layer/#layer) is +called an **image**. An image never changes. + +Since Docker uses a [*Union File System*](/terms/layer/#union-file-system), the +processes think the whole file system is mounted read-write. But all the +changes go to the top-most writeable layer, and underneath, the original +file in the read-only image is unchanged. Since images don't change, +images do not have state. + +![](/terms/images/docker-filesystems-debianrw.png) + +## Parent image + +![](/terms/images/docker-filesystems-multilayer.png) + +Each image may depend on one more image which forms the layer beneath +it. We sometimes say that the lower image is the **parent** of the upper +image. + +## Base image + +An image that has no parent is a **base image**. + +## Image IDs + +All images are identified by a 64 hexadecimal digit string (internally a +256bit value). To simplify their use, a short ID of the first 12 +characters can be used on the command line. There is a small possibility +of short id collisions, so the docker server will always return the long +ID. diff --git a/docs/terms/images/docker-filesystems-busyboxrw.png b/docs/terms/images/docker-filesystems-busyboxrw.png new file mode 100644 index 00000000..5f046cbc Binary files /dev/null and b/docs/terms/images/docker-filesystems-busyboxrw.png differ diff --git a/docs/terms/images/docker-filesystems-debian.png b/docs/terms/images/docker-filesystems-debian.png new file mode 100644 index 00000000..3502b384 Binary files /dev/null and b/docs/terms/images/docker-filesystems-debian.png differ diff --git a/docs/terms/images/docker-filesystems-debianrw.png b/docs/terms/images/docker-filesystems-debianrw.png new file mode 100644 index 00000000..387c9eb7 Binary files /dev/null and b/docs/terms/images/docker-filesystems-debianrw.png differ diff --git a/docs/terms/images/docker-filesystems-generic.png b/docs/terms/images/docker-filesystems-generic.png new file mode 100644 index 00000000..e24dce63 Binary files /dev/null and b/docs/terms/images/docker-filesystems-generic.png differ diff --git a/docs/terms/images/docker-filesystems-multilayer.png b/docs/terms/images/docker-filesystems-multilayer.png new file mode 100644 index 00000000..632aa41c Binary files /dev/null and b/docs/terms/images/docker-filesystems-multilayer.png differ diff --git a/docs/terms/images/docker-filesystems-multiroot.png b/docs/terms/images/docker-filesystems-multiroot.png new file mode 100644 index 00000000..f9bcc38f Binary files /dev/null and b/docs/terms/images/docker-filesystems-multiroot.png differ diff --git a/docs/terms/images/docker-filesystems.svg b/docs/terms/images/docker-filesystems.svg new file mode 100644 index 00000000..054402db --- /dev/null +++ b/docs/terms/images/docker-filesystems.svg @@ -0,0 +1,1536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + referencesparentimage + + + + diff --git a/docs/terms/layer.md b/docs/terms/layer.md new file mode 100644 index 00000000..1b0ebad5 --- /dev/null +++ b/docs/terms/layer.md @@ -0,0 +1,42 @@ + + +# Layers + +## Introduction + +In a traditional Linux boot, the kernel first mounts the root [*File +System*](/terms/filesystem) as read-only, checks its +integrity, and then switches the whole rootfs volume to read-write mode. + +## Layer + +When Docker mounts the rootfs, it starts read-only, as in a traditional +Linux boot, but then, instead of changing the file system to read-write +mode, it takes advantage of a [union +mount](http://en.wikipedia.org/wiki/Union_mount) to add a read-write +file system *over* the read-only file system. In fact there may be +multiple read-only file systems stacked on top of each other. We think +of each one of these file systems as a **layer**. + +![](/terms/images/docker-filesystems-multilayer.png) + +At first, the top read-write layer has nothing in it, but any time a +process creates a file, this happens in the top layer. And if something +needs to update an existing file in a lower layer, then the file gets +copied to the upper layer and changes go into the copy. The version of +the file on the lower layer cannot be seen by the applications anymore, +but it is there, unchanged. + +## Union File System + +We call the union of the read-write layer and all the read-only layers a +**union file system**. diff --git a/docs/terms/registry.md b/docs/terms/registry.md new file mode 100644 index 00000000..d1b268aa --- /dev/null +++ b/docs/terms/registry.md @@ -0,0 +1,27 @@ + + +# Registry + +## Introduction + +A Registry is a hosted service containing +[*repositories*](/terms/repository/#repository-def) of +[*images*](/terms/image/#image-def) which responds to the Registry API. + +The default registry can be accessed using a browser at +[Docker Hub](https://hub.docker.com) or using the +`docker search` command. + +## Further reading + +For more information see [*Working with +Repositories*](/userguide/dockerrepos/#working-with-the-repository) diff --git a/docs/terms/repository.md b/docs/terms/repository.md new file mode 100644 index 00000000..5bf429cd --- /dev/null +++ b/docs/terms/repository.md @@ -0,0 +1,42 @@ + + +# Repository + +## Introduction + +A repository is a set of images either on your local Docker server, or +shared, by pushing it to a [*Registry*](/terms/registry/#registry-def) +server. + +Images can be associated with a repository (or multiple) by giving them +an image name using one of three different commands: + +1. At build time (e.g., `docker build -t IMAGENAME`), +2. When committing a container (e.g., + `docker commit CONTAINERID IMAGENAME`) or +3. When tagging an image id with an image name (e.g., + `docker tag IMAGEID IMAGENAME`). + +A Fully Qualified Image Name (FQIN) can be made up of 3 parts: + +`[registry_hostname[:port]/][user_name/](repository_name:version_tag)` + +`username` and `registry_hostname` default to an empty string. When +`registry_hostname` is an empty string, then `docker push` will push to +`index.docker.io:80`. + +If you create a new repository which you want to share, you will need to +set at least the `user_name`, as the `default` blank `user_name` prefix is +reserved for [Official Repositories](/docker-hub/official_repos). + +For more information see [*Working with +Repositories*](/userguide/dockerrepos/#working-with-the-repository) diff --git a/docs/touch-up.sh b/docs/touch-up.sh new file mode 100755 index 00000000..1dd0b1dc --- /dev/null +++ b/docs/touch-up.sh @@ -0,0 +1,20 @@ +#!/bin/bash -e + + +# Sed to process GitHub Markdown +# 1-2 Remove comment code from metadata block +# +for i in ls -l /docs/content/* + do # Line breaks are important + if [ -d $i ] # Spaces are important + then + y=${i##*/} + find $i -type f -name "*.md" -exec sed -i.old \ + -e '/^/g' \ + -e '/^/g' {} \; + fi +done + + + + diff --git a/docs/userguide/dockerhub.md b/docs/userguide/dockerhub.md new file mode 100644 index 00000000..57d376fe --- /dev/null +++ b/docs/userguide/dockerhub.md @@ -0,0 +1,78 @@ + + +# Getting started with Docker Hub + + +This section provides a quick introduction to the [Docker Hub](https://hub.docker.com), +including how to create an account. + +The [Docker Hub](https://hub.docker.com) is a centralized resource for working with +Docker and its components. Docker Hub helps you collaborate with colleagues and get the +most out of Docker. To do this, it provides services such as: + +* Docker image hosting. +* User authentication. +* Automated image builds and work-flow tools such as build triggers and web + hooks. +* Integration with GitHub and Bitbucket. + +In order to use Docker Hub, you will first need to register and create an account. Don't +worry, creating an account is simple and free. + +## Creating a Docker Hub account + +There are two ways for you to register and create an account: + +1. Via the web, or +2. Via the command line. + +### Register via the web + +Fill in the [sign-up form](https://hub.docker.com/account/signup/) by +choosing your user name and password and entering a valid email address. You can also +sign up for the Docker Weekly mailing list, which has lots of information about what's +going on in the world of Docker. + +![Register using the sign-up page](/userguide/register-web.png) + +### Register via the command line + +You can also create a Docker Hub account via the command line with the +`docker login` command. + + $ docker login + +### Confirm your email + +Once you've filled in the form, check your email for a welcome message asking for +confirmation so we can activate your account. + + +### Login + +After you complete the confirmation process, you can login using the web console: + +![Login using the web console](/userguide/login-web.png) + +Or via the command line with the `docker login` command: + + $ docker login + +Your Docker Hub account is now active and ready to use. + +## Next steps + +Next, let's start learning how to Dockerize applications with our "Hello world" +exercise. + +Go to [Dockerizing Applications](/userguide/dockerizing). + diff --git a/docs/userguide/dockerimages.md b/docs/userguide/dockerimages.md new file mode 100644 index 00000000..795fff81 --- /dev/null +++ b/docs/userguide/dockerimages.md @@ -0,0 +1,582 @@ + + +# Get started with images + +In the [introduction](/introduction/understanding-docker/) we've discovered that Docker +images are the basis of containers. In the +[previous](/userguide/dockerizing/) [sections](/userguide/usingdocker/) +we've used Docker images that already exist, for example the `ubuntu` +image and the `training/webapp` image. + +We've also discovered that Docker stores downloaded images on the Docker +host. If an image isn't already present on the host then it'll be +downloaded from a registry: by default the +[Docker Hub Registry](https://registry.hub.docker.com). + +In this section we're going to explore Docker images a bit more +including: + +* Managing and working with images locally on your Docker host; +* Creating basic images; +* Uploading images to [Docker Hub Registry](https://registry.hub.docker.com). + +## Listing images on the host + +Let's start with listing the images we have locally on our host. You can +do this using the `docker images` command like so: + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + training/webapp latest fc77f57ad303 3 weeks ago 280.5 MB + ubuntu 13.10 5e019ab7bf6d 4 weeks ago 180 MB + ubuntu saucy 5e019ab7bf6d 4 weeks ago 180 MB + ubuntu 12.04 74fe38d11401 4 weeks ago 209.6 MB + ubuntu precise 74fe38d11401 4 weeks ago 209.6 MB + ubuntu 12.10 a7cf8ae4e998 4 weeks ago 171.3 MB + ubuntu quantal a7cf8ae4e998 4 weeks ago 171.3 MB + ubuntu 14.04 99ec81b80c55 4 weeks ago 266 MB + ubuntu latest 99ec81b80c55 4 weeks ago 266 MB + ubuntu trusty 99ec81b80c55 4 weeks ago 266 MB + ubuntu 13.04 316b678ddf48 4 weeks ago 169.4 MB + ubuntu raring 316b678ddf48 4 weeks ago 169.4 MB + ubuntu 10.04 3db9c44f4520 4 weeks ago 183 MB + ubuntu lucid 3db9c44f4520 4 weeks ago 183 MB + +We can see the images we've previously used in our [user guide](/userguide/). +Each has been downloaded from [Docker Hub](https://hub.docker.com) when we +launched a container using that image. + +We can see three crucial pieces of information about our images in the listing. + +* What repository they came from, for example `ubuntu`. +* The tags for each image, for example `14.04`. +* The image ID of each image. + +> **Note:** +> Previously, the `docker images` command supported the `--tree` and `--dot` +> arguments, which displayed different visualizations of the image data. Docker +> core removed this functionality in the 1.7 version. If you liked this +> functionality, you can still find it in +> [the third-party dockviz tool](https://github.com/justone/dockviz). + +A repository potentially holds multiple variants of an image. In the case of +our `ubuntu` image we can see multiple variants covering Ubuntu 10.04, 12.04, +12.10, 13.04, 13.10 and 14.04. Each variant is identified by a tag and you can +refer to a tagged image like so: + + ubuntu:14.04 + +So when we run a container we refer to a tagged image like so: + + $ docker run -t -i ubuntu:14.04 /bin/bash + +If instead we wanted to run an Ubuntu 12.04 image we'd use: + + $ docker run -t -i ubuntu:12.04 /bin/bash + +If you don't specify a variant, for example you just use `ubuntu`, then Docker +will default to using the `ubuntu:latest` image. + +> **Tip:** +> We recommend you always use a specific tagged image, for example +> `ubuntu:12.04`. That way you always know exactly what variant of an image is +> being used. + +## Getting a new image + +So how do we get new images? Well Docker will automatically download any image +we use that isn't already present on the Docker host. But this can potentially +add some time to the launch of a container. If we want to pre-load an image we +can download it using the `docker pull` command. Let's say we'd like to +download the `centos` image. + + $ docker pull centos + Pulling repository centos + b7de3133ff98: Pulling dependent layers + 5cc9e91966f7: Pulling fs layer + 511136ea3c5a: Download complete + ef52fb1fe610: Download complete + . . . + + Status: Downloaded newer image for centos + +We can see that each layer of the image has been pulled down and now we +can run a container from this image and we won't have to wait to +download the image. + + $ docker run -t -i centos /bin/bash + bash-4.1# + +## Finding images + +One of the features of Docker is that a lot of people have created Docker +images for a variety of purposes. Many of these have been uploaded to +[Docker Hub](https://hub.docker.com). We can search these images on the +[Docker Hub](https://hub.docker.com) website. + +![indexsearch](/userguide/search.png) + +We can also search for images on the command line using the `docker search` +command. Let's say our team wants an image with Ruby and Sinatra installed on +which to do our web application development. We can search for a suitable image +by using the `docker search` command to find all the images that contain the +term `sinatra`. + + $ docker search sinatra + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + training/sinatra Sinatra training image 0 [OK] + marceldegraaf/sinatra Sinatra test app 0 + mattwarren/docker-sinatra-demo 0 [OK] + luisbebop/docker-sinatra-hello-world 0 [OK] + bmorearty/handson-sinatra handson-ruby + Sinatra for Hands on with D... 0 + subwiz/sinatra 0 + bmorearty/sinatra 0 + . . . + +We can see we've returned a lot of images that use the term `sinatra`. We've +returned a list of image names, descriptions, Stars (which measure the social +popularity of images - if a user likes an image then they can "star" it), and +the Official and Automated build statuses. +[Official Repositories](/docker-hub/official_repos) are a carefully curated set +of Docker repositories supported by Docker, Inc. Automated repositories are +[Automated Builds](/userguide/dockerrepos/#automated-builds) that allow you to +validate the source and content of an image. + +We've reviewed the images available to use and we decided to use the +`training/sinatra` image. So far we've seen two types of images repositories, +images like `ubuntu`, which are called base or root images. These base images +are provided by Docker Inc and are built, validated and supported. These can be +identified by their single word names. + +We've also seen user images, for example the `training/sinatra` image we've +chosen. A user image belongs to a member of the Docker community and is built +and maintained by them. You can identify user images as they are always +prefixed with the user name, here `training`, of the user that created them. + +## Pulling our image + +We've identified a suitable image, `training/sinatra`, and now we can download it using the `docker pull` command. + + $ docker pull training/sinatra + +The team can now use this image by running their own containers. + + $ docker run -t -i training/sinatra /bin/bash + root@a8cb6ce02d85:/# + +## Creating our own images + +The team has found the `training/sinatra` image pretty useful but it's not quite what +they need and we need to make some changes to it. There are two ways we can +update and create images. + +1. We can update a container created from an image and commit the results to an image. +2. We can use a `Dockerfile` to specify instructions to create an image. + + +### Updating and committing an image + +To update an image we first need to create a container from the image +we'd like to update. + + $ docker run -t -i training/sinatra /bin/bash + root@0b2616b0e5a8:/# + +> **Note:** +> Take note of the container ID that has been created, `0b2616b0e5a8`, as we'll +> need it in a moment. + +Inside our running container let's add the `json` gem. + + root@0b2616b0e5a8:/# gem install json + +Once this has completed let's exit our container using the `exit` +command. + +Now we have a container with the change we want to make. We can then +commit a copy of this container to an image using the `docker commit` +command. + + $ docker commit -m "Added json gem" -a "Kate Smith" \ + 0b2616b0e5a8 ouruser/sinatra:v2 + 4f177bd27a9ff0f6dc2a830403925b5360bfe0b93d476f7fc3231110e7f71b1c + +Here we've used the `docker commit` command. We've specified two flags: `-m` +and `-a`. The `-m` flag allows us to specify a commit message, much like you +would with a commit on a version control system. The `-a` flag allows us to +specify an author for our update. + +We've also specified the container we want to create this new image from, +`0b2616b0e5a8` (the ID we recorded earlier) and we've specified a target for +the image: + + ouruser/sinatra:v2 + +Let's break this target down. It consists of a new user, `ouruser`, that we're +writing this image to. We've also specified the name of the image, here we're +keeping the original image name `sinatra`. Finally we're specifying a tag for +the image: `v2`. + +We can then look at our new `ouruser/sinatra` image using the `docker images` +command. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + training/sinatra latest 5bc342fa0b91 10 hours ago 446.7 MB + ouruser/sinatra v2 3c59e02ddd1a 10 hours ago 446.7 MB + ouruser/sinatra latest 5db5f8471261 10 hours ago 446.7 MB + +To use our new image to create a container we can then: + + $ docker run -t -i ouruser/sinatra:v2 /bin/bash + root@78e82f680994:/# + +### Building an image from a `Dockerfile` + +Using the `docker commit` command is a pretty simple way of extending an image +but it's a bit cumbersome and it's not easy to share a development process for +images amongst a team. Instead we can use a new command, `docker build`, to +build new images from scratch. + +To do this we create a `Dockerfile` that contains a set of instructions that +tell Docker how to build our image. + +Let's create a directory and a `Dockerfile` first. + + $ mkdir sinatra + $ cd sinatra + $ touch Dockerfile + +If you are using Docker Machine on Windows, you may access your host +directory by `cd` to `/c/Users/your_user_name`. + +Each instruction creates a new layer of the image. Let's look at a simple +example now for building our own Sinatra image for our development team. + + # This is a comment + FROM ubuntu:14.04 + MAINTAINER Kate Smith + RUN apt-get update && apt-get install -y ruby ruby-dev + RUN gem install sinatra + +Let's look at what our `Dockerfile` does. Each instruction prefixes a statement and is capitalized. + + INSTRUCTION statement + +> **Note:** +> We use `#` to indicate a comment + +The first instruction `FROM` tells Docker what the source of our image is, in +this case we're basing our new image on an Ubuntu 14.04 image. + +Next we use the `MAINTAINER` instruction to specify who maintains our new image. + +Lastly, we've specified two `RUN` instructions. A `RUN` instruction executes +a command inside the image, for example installing a package. Here we're +updating our APT cache, installing Ruby and RubyGems and then installing the +Sinatra gem. + +> **Note:** +> There are [a lot more instructions available to us in a Dockerfile](/reference/builder). + +Now let's take our `Dockerfile` and use the `docker build` command to build an image. + + $ docker build -t ouruser/sinatra:v2 . + Sending build context to Docker daemon 2.048 kB + Sending build context to Docker daemon + Step 0 : FROM ubuntu:14.04 + ---> e54ca5efa2e9 + Step 1 : MAINTAINER Kate Smith + ---> Using cache + ---> 851baf55332b + Step 2 : RUN apt-get update && apt-get install -y ruby ruby-dev + ---> Running in 3a2558904e9b + Selecting previously unselected package libasan0:amd64. + (Reading database ... 11518 files and directories currently installed.) + Preparing to unpack .../libasan0_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libasan0:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libatomic1:amd64. + Preparing to unpack .../libatomic1_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libatomic1:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libgmp10:amd64. + Preparing to unpack .../libgmp10_2%3a5.1.3+dfsg-1ubuntu1_amd64.deb ... + Unpacking libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ... + Selecting previously unselected package libisl10:amd64. + Preparing to unpack .../libisl10_0.12.2-1_amd64.deb ... + Unpacking libisl10:amd64 (0.12.2-1) ... + Selecting previously unselected package libcloog-isl4:amd64. + Preparing to unpack .../libcloog-isl4_0.18.2-1_amd64.deb ... + Unpacking libcloog-isl4:amd64 (0.18.2-1) ... + Selecting previously unselected package libgomp1:amd64. + Preparing to unpack .../libgomp1_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libgomp1:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libitm1:amd64. + Preparing to unpack .../libitm1_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libitm1:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libmpfr4:amd64. + Preparing to unpack .../libmpfr4_3.1.2-1_amd64.deb ... + Unpacking libmpfr4:amd64 (3.1.2-1) ... + Selecting previously unselected package libquadmath0:amd64. + Preparing to unpack .../libquadmath0_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libquadmath0:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libtsan0:amd64. + Preparing to unpack .../libtsan0_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libtsan0:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libyaml-0-2:amd64. + Preparing to unpack .../libyaml-0-2_0.1.4-3ubuntu3_amd64.deb ... + Unpacking libyaml-0-2:amd64 (0.1.4-3ubuntu3) ... + Selecting previously unselected package libmpc3:amd64. + Preparing to unpack .../libmpc3_1.0.1-1ubuntu1_amd64.deb ... + Unpacking libmpc3:amd64 (1.0.1-1ubuntu1) ... + Selecting previously unselected package openssl. + Preparing to unpack .../openssl_1.0.1f-1ubuntu2.4_amd64.deb ... + Unpacking openssl (1.0.1f-1ubuntu2.4) ... + Selecting previously unselected package ca-certificates. + Preparing to unpack .../ca-certificates_20130906ubuntu2_all.deb ... + Unpacking ca-certificates (20130906ubuntu2) ... + Selecting previously unselected package manpages. + Preparing to unpack .../manpages_3.54-1ubuntu1_all.deb ... + Unpacking manpages (3.54-1ubuntu1) ... + Selecting previously unselected package binutils. + Preparing to unpack .../binutils_2.24-5ubuntu3_amd64.deb ... + Unpacking binutils (2.24-5ubuntu3) ... + Selecting previously unselected package cpp-4.8. + Preparing to unpack .../cpp-4.8_4.8.2-19ubuntu1_amd64.deb ... + Unpacking cpp-4.8 (4.8.2-19ubuntu1) ... + Selecting previously unselected package cpp. + Preparing to unpack .../cpp_4%3a4.8.2-1ubuntu6_amd64.deb ... + Unpacking cpp (4:4.8.2-1ubuntu6) ... + Selecting previously unselected package libgcc-4.8-dev:amd64. + Preparing to unpack .../libgcc-4.8-dev_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package gcc-4.8. + Preparing to unpack .../gcc-4.8_4.8.2-19ubuntu1_amd64.deb ... + Unpacking gcc-4.8 (4.8.2-19ubuntu1) ... + Selecting previously unselected package gcc. + Preparing to unpack .../gcc_4%3a4.8.2-1ubuntu6_amd64.deb ... + Unpacking gcc (4:4.8.2-1ubuntu6) ... + Selecting previously unselected package libc-dev-bin. + Preparing to unpack .../libc-dev-bin_2.19-0ubuntu6_amd64.deb ... + Unpacking libc-dev-bin (2.19-0ubuntu6) ... + Selecting previously unselected package linux-libc-dev:amd64. + Preparing to unpack .../linux-libc-dev_3.13.0-30.55_amd64.deb ... + Unpacking linux-libc-dev:amd64 (3.13.0-30.55) ... + Selecting previously unselected package libc6-dev:amd64. + Preparing to unpack .../libc6-dev_2.19-0ubuntu6_amd64.deb ... + Unpacking libc6-dev:amd64 (2.19-0ubuntu6) ... + Selecting previously unselected package ruby. + Preparing to unpack .../ruby_1%3a1.9.3.4_all.deb ... + Unpacking ruby (1:1.9.3.4) ... + Selecting previously unselected package ruby1.9.1. + Preparing to unpack .../ruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ... + Unpacking ruby1.9.1 (1.9.3.484-2ubuntu1) ... + Selecting previously unselected package libruby1.9.1. + Preparing to unpack .../libruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ... + Unpacking libruby1.9.1 (1.9.3.484-2ubuntu1) ... + Selecting previously unselected package manpages-dev. + Preparing to unpack .../manpages-dev_3.54-1ubuntu1_all.deb ... + Unpacking manpages-dev (3.54-1ubuntu1) ... + Selecting previously unselected package ruby1.9.1-dev. + Preparing to unpack .../ruby1.9.1-dev_1.9.3.484-2ubuntu1_amd64.deb ... + Unpacking ruby1.9.1-dev (1.9.3.484-2ubuntu1) ... + Selecting previously unselected package ruby-dev. + Preparing to unpack .../ruby-dev_1%3a1.9.3.4_all.deb ... + Unpacking ruby-dev (1:1.9.3.4) ... + Setting up libasan0:amd64 (4.8.2-19ubuntu1) ... + Setting up libatomic1:amd64 (4.8.2-19ubuntu1) ... + Setting up libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ... + Setting up libisl10:amd64 (0.12.2-1) ... + Setting up libcloog-isl4:amd64 (0.18.2-1) ... + Setting up libgomp1:amd64 (4.8.2-19ubuntu1) ... + Setting up libitm1:amd64 (4.8.2-19ubuntu1) ... + Setting up libmpfr4:amd64 (3.1.2-1) ... + Setting up libquadmath0:amd64 (4.8.2-19ubuntu1) ... + Setting up libtsan0:amd64 (4.8.2-19ubuntu1) ... + Setting up libyaml-0-2:amd64 (0.1.4-3ubuntu3) ... + Setting up libmpc3:amd64 (1.0.1-1ubuntu1) ... + Setting up openssl (1.0.1f-1ubuntu2.4) ... + Setting up ca-certificates (20130906ubuntu2) ... + debconf: unable to initialize frontend: Dialog + debconf: (TERM is not set, so the dialog frontend is not usable.) + debconf: falling back to frontend: Readline + debconf: unable to initialize frontend: Readline + debconf: (This frontend requires a controlling tty.) + debconf: falling back to frontend: Teletype + Setting up manpages (3.54-1ubuntu1) ... + Setting up binutils (2.24-5ubuntu3) ... + Setting up cpp-4.8 (4.8.2-19ubuntu1) ... + Setting up cpp (4:4.8.2-1ubuntu6) ... + Setting up libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ... + Setting up gcc-4.8 (4.8.2-19ubuntu1) ... + Setting up gcc (4:4.8.2-1ubuntu6) ... + Setting up libc-dev-bin (2.19-0ubuntu6) ... + Setting up linux-libc-dev:amd64 (3.13.0-30.55) ... + Setting up libc6-dev:amd64 (2.19-0ubuntu6) ... + Setting up manpages-dev (3.54-1ubuntu1) ... + Setting up libruby1.9.1 (1.9.3.484-2ubuntu1) ... + Setting up ruby1.9.1-dev (1.9.3.484-2ubuntu1) ... + Setting up ruby-dev (1:1.9.3.4) ... + Setting up ruby (1:1.9.3.4) ... + Setting up ruby1.9.1 (1.9.3.484-2ubuntu1) ... + Processing triggers for libc-bin (2.19-0ubuntu6) ... + Processing triggers for ca-certificates (20130906ubuntu2) ... + Updating certificates in /etc/ssl/certs... 164 added, 0 removed; done. + Running hooks in /etc/ca-certificates/update.d....done. + ---> c55c31703134 + Removing intermediate container 3a2558904e9b + Step 3 : RUN gem install sinatra + ---> Running in 6b81cb6313e5 + unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping + unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping + Successfully installed rack-1.5.2 + Successfully installed tilt-1.4.1 + Successfully installed rack-protection-1.5.3 + Successfully installed sinatra-1.4.5 + 4 gems installed + Installing ri documentation for rack-1.5.2... + Installing ri documentation for tilt-1.4.1... + Installing ri documentation for rack-protection-1.5.3... + Installing ri documentation for sinatra-1.4.5... + Installing RDoc documentation for rack-1.5.2... + Installing RDoc documentation for tilt-1.4.1... + Installing RDoc documentation for rack-protection-1.5.3... + Installing RDoc documentation for sinatra-1.4.5... + ---> 97feabe5d2ed + Removing intermediate container 6b81cb6313e5 + Successfully built 97feabe5d2ed + +We've specified our `docker build` command and used the `-t` flag to identify +our new image as belonging to the user `ouruser`, the repository name `sinatra` +and given it the tag `v2`. + +We've also specified the location of our `Dockerfile` using the `.` to +indicate a `Dockerfile` in the current directory. + +> **Note:** +> You can also specify a path to a `Dockerfile`. + +Now we can see the build process at work. The first thing Docker does is +upload the build context: basically the contents of the directory you're +building in. This is done because the Docker daemon does the actual +build of the image and it needs the local context to do it. + +Next we can see each instruction in the `Dockerfile` being executed +step-by-step. We can see that each step creates a new container, runs +the instruction inside that container and then commits that change - +just like the `docker commit` work flow we saw earlier. When all the +instructions have executed we're left with the `97feabe5d2ed` image +(also helpfully tagged as `ouruser/sinatra:v2`) and all intermediate +containers will get removed to clean things up. + +> **Note:** +> An image can't have more than 127 layers regardless of the storage driver. +> This limitation is set globally to encourage optimization of the overall +> size of images. + +We can then create a container from our new image. + + $ docker run -t -i ouruser/sinatra:v2 /bin/bash + root@8196968dac35:/# + +> **Note:** +> This is just a brief introduction to creating images. We've +> skipped a whole bunch of other instructions that you can use. We'll see more of +> those instructions in later sections of the Guide or you can refer to the +> [`Dockerfile`](/reference/builder/) reference for a +> detailed description and examples of every instruction. +> To help you write a clear, readable, maintainable `Dockerfile`, we've also +> written a [`Dockerfile` Best Practices guide](/articles/dockerfile_best-practices). + +### More + +To learn more, check out the [Dockerfile tutorial](/userguide/level1). + +## Setting tags on an image + +You can also add a tag to an existing image after you commit or build it. We +can do this using the `docker tag` command. Let's add a new tag to our +`ouruser/sinatra` image. + + $ docker tag 5db5f8471261 ouruser/sinatra:devel + +The `docker tag` command takes the ID of the image, here `5db5f8471261`, and our +user name, the repository name and the new tag. + +Let's see our new tag using the `docker images` command. + + $ docker images ouruser/sinatra + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + ouruser/sinatra latest 5db5f8471261 11 hours ago 446.7 MB + ouruser/sinatra devel 5db5f8471261 11 hours ago 446.7 MB + ouruser/sinatra v2 5db5f8471261 11 hours ago 446.7 MB + +## Image Digests + +Images that use the v2 or later format have a content-addressable identifier +called a `digest`. As long as the input used to generate the image is +unchanged, the digest value is predictable. To list image digest values, use +the `--digests` flag: + + $ docker images --digests | head + REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE + ouruser/sinatra latest sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 5db5f8471261 11 hours ago 446.7 MB + +When pushing or pulling to a 2.0 registry, the `push` or `pull` command +output includes the image digest. You can `pull` using a digest value. + + $ docker pull ouruser/sinatra@cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + +You can also reference by digest in `create`, `run`, and `rmi` commands, as well as the +`FROM` image reference in a Dockerfile. + +## Push an image to Docker Hub + +Once you've built or created a new image you can push it to [Docker +Hub](https://hub.docker.com) using the `docker push` command. This +allows you to share it with others, either publicly, or push it into [a +private repository](https://registry.hub.docker.com/plans/). + + $ docker push ouruser/sinatra + The push refers to a repository [ouruser/sinatra] (len: 1) + Sending image list + Pushing repository ouruser/sinatra (3 tags) + . . . + +## Remove an image from the host + +You can also remove images on your Docker host in a way [similar to +containers]( +/userguide/usingdocker) using the `docker rmi` command. + +Let's delete the `training/sinatra` image as we don't need it anymore. + + $ docker rmi training/sinatra + Untagged: training/sinatra:latest + Deleted: 5bc342fa0b91cabf65246837015197eecfa24b2213ed6a51a8974ae250fedd8d + Deleted: ed0fffdcdae5eb2c3a55549857a8be7fc8bc4241fb19ad714364cbfd7a56b22f + Deleted: 5c58979d73ae448df5af1d8142436d81116187a7633082650549c52c3a2418f0 + +> **Note:** In order to remove an image from the host, please make sure +> that there are no containers actively based on it. + +# Next steps + +Until now we've seen how to build individual applications inside Docker +containers. Now learn how to build whole application stacks with Docker +by linking together multiple Docker containers. + +Test your Dockerfile knowledge with the +[Dockerfile tutorial](/userguide/level1). + +Go to [Linking Containers Together](/userguide/dockerlinks). + + diff --git a/docs/userguide/dockerizing.md b/docs/userguide/dockerizing.md new file mode 100644 index 00000000..3f9c730d --- /dev/null +++ b/docs/userguide/dockerizing.md @@ -0,0 +1,203 @@ + + +# Dockerizing applications: A "Hello world" + +*So what's this Docker thing all about?* + +Docker allows you to run applications inside containers. Running an +application inside a container takes a single command: `docker run`. + +>**Note**: Depending on your Docker system configuration, you may be required to +>preface each `docker` command on this page with `sudo`. To avoid this behavior, +>your system administrator can create a Unix group called `docker` and add users +>to it. + +## Hello world + +Let's try it now. + + $ docker run ubuntu:14.04 /bin/echo 'Hello world' + Hello world + +And you just launched your first container! + +So what just happened? Let's step through what the `docker run` command +did. + +First we specified the `docker` binary and the command we wanted to +execute, `run`. The `docker run` combination *runs* containers. + +Next we specified an image: `ubuntu:14.04`. This is the source of the container +we ran. Docker calls this an image. In this case we used an Ubuntu 14.04 +operating system image. + +When you specify an image, Docker looks first for the image on your +Docker host. If it can't find it then it downloads the image from the public +image registry: [Docker Hub](https://hub.docker.com). + +Next we told Docker what command to run inside our new container: + + /bin/echo 'Hello world' + +When our container was launched Docker created a new Ubuntu 14.04 +environment and then executed the `/bin/echo` command inside it. We saw +the result on the command line: + + Hello world + +So what happened to our container after that? Well Docker containers +only run as long as the command you specify is active. Here, as soon as +`Hello world` was echoed, the container stopped. + +## An interactive container + +Let's try the `docker run` command again, this time specifying a new +command to run in our container. + + $ docker run -t -i ubuntu:14.04 /bin/bash + root@af8bae53bdd3:/# + +Here we've again specified the `docker run` command and launched an +`ubuntu:14.04` image. But we've also passed in two flags: `-t` and `-i`. +The `-t` flag assigns a pseudo-tty or terminal inside our new container +and the `-i` flag allows us to make an interactive connection by +grabbing the standard in (`STDIN`) of the container. + +We've also specified a new command for our container to run: +`/bin/bash`. This will launch a Bash shell inside our container. + +So now when our container is launched we can see that we've got a +command prompt inside it: + + root@af8bae53bdd3:/# + +Let's try running some commands inside our container: + + root@af8bae53bdd3:/# pwd + / + root@af8bae53bdd3:/# ls + bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var + +You can see we've run the `pwd` to show our current directory and can +see we're in the `/` root directory. We've also done a directory listing +of the root directory which shows us what looks like a typical Linux +file system. + +You can play around inside this container and when you're done you can +use the `exit` command or enter Ctrl-D to finish. + + root@af8bae53bdd3:/# exit + +As with our previous container, once the Bash shell process has +finished, the container is stopped. + +## A daemonized Hello world + +Now a container that runs a command and then exits has some uses but +it's not overly helpful. Let's create a container that runs as a daemon, +like most of the applications we're probably going to run with Docker. + +Again we can do this with the `docker run` command: + + $ docker run -d ubuntu:14.04 /bin/sh -c "while true; do echo hello world; sleep 1; done" + 1e5535038e285177d5214659a068137486f96ee5c2e85a4ac52dc83f2ebe4147 + +Wait, what? Where's our "hello world" output? Let's look at what we've run here. +It should look pretty familiar. We ran `docker run` but this time we +specified a flag: `-d`. The `-d` flag tells Docker to run the container +and put it in the background, to daemonize it. + +We also specified the same image: `ubuntu:14.04`. + +Finally, we specified a command to run: + + /bin/sh -c "while true; do echo hello world; sleep 1; done" + +This is the (hello) world's silliest daemon: a shell script that echoes +`hello world` forever. + +So why aren't we seeing any `hello world`'s? Instead Docker has returned +a really long string: + + 1e5535038e285177d5214659a068137486f96ee5c2e85a4ac52dc83f2ebe4147 + +This really long string is called a *container ID*. It uniquely +identifies a container so we can work with it. + +> **Note:** +> The container ID is a bit long and unwieldy and a bit later +> on we'll see a shorter ID and some ways to name our containers to make +> working with them easier. + +We can use this container ID to see what's happening with our `hello world` daemon. + +Firstly let's make sure our container is running. We can +do that with the `docker ps` command. The `docker ps` command queries +the Docker daemon for information about all the containers it knows +about. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 1e5535038e28 ubuntu:14.04 /bin/sh -c 'while tr 2 minutes ago Up 1 minute insane_babbage + +Here we can see our daemonized container. The `docker ps` has returned some useful +information about it, starting with a shorter variant of its container ID: +`1e5535038e28`. + +We can also see the image we used to build it, `ubuntu:14.04`, the command it +is running, its status and an automatically assigned name, +`insane_babbage`. + +> **Note:** +> Docker automatically names any containers you start, a +> little later on we'll see how you can specify your own names. + +Okay, so we now know it's running. But is it doing what we asked it to do? To see this +we're going to look inside the container using the `docker logs` +command. Let's use the container name Docker assigned. + + $ docker logs insane_babbage + hello world + hello world + hello world + . . . + +The `docker logs` command looks inside the container and returns its standard +output: in this case the output of our command `hello world`. + +Awesome! Our daemon is working and we've just created our first +Dockerized application! + +Now we've established we can create our own containers let's tidy up +after ourselves and stop our daemonized container. To do this we use the +`docker stop` command. + + $ docker stop insane_babbage + insane_babbage + +The `docker stop` command tells Docker to politely stop the running +container. If it succeeds it will return the name of the container it +has just stopped. + +Let's check it worked with the `docker ps` command. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + +Excellent. Our container has been stopped. + +# Next steps + +Now we've seen how simple it is to get started with Docker. Let's learn how to +do some more advanced tasks. + +Go to [Working With Containers](/userguide/usingdocker). + diff --git a/docs/userguide/dockerlinks.md b/docs/userguide/dockerlinks.md new file mode 100644 index 00000000..228e46a3 --- /dev/null +++ b/docs/userguide/dockerlinks.md @@ -0,0 +1,345 @@ + + +# Linking containers together + +In [the Using Docker section](/userguide/usingdocker), you saw how you can +connect to a service running inside a Docker container via a network +port. But a port connection is only one way you can interact with services and +applications running inside Docker containers. In this section, we'll briefly revisit +connecting via a network port and then we'll introduce you to another method of access: +container linking. + +## Connect using network port mapping + +In [the Using Docker section](/userguide/usingdocker), you created a +container that ran a Python Flask application: + + $ docker run -d -P training/webapp python app.py + +> **Note:** +> Containers have an internal network and an IP address +> (as we saw when we used the `docker inspect` command to show the container's +> IP address in the [Using Docker](/userguide/usingdocker/) section). +> Docker can have a variety of network configurations. You can see more +> information on Docker networking [here](/articles/networking/). + +When that container was created, the `-P` flag was used to automatically map +any network port inside it to a random high port within an *ephemeral port +range* on your Docker host. Next, when `docker ps` was run, you saw that port +5000 in the container was bound to port 49155 on the host. + + $ docker ps nostalgic_morse + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + bc533791f3f5 training/webapp:latest python app.py 5 seconds ago Up 2 seconds 0.0.0.0:49155->5000/tcp nostalgic_morse + +You also saw how you can bind a container's ports to a specific port using +the `-p` flag. Here port 80 of the host is mapped to port 5000 of the +container: + + $ docker run -d -p 80:5000 training/webapp python app.py + +And you saw why this isn't such a great idea because it constrains you to +only one container on that specific port. + +There are also a few other ways you can configure the `-p` flag. By +default the `-p` flag will bind the specified port to all interfaces on +the host machine. But you can also specify a binding to a specific +interface, for example only to the `localhost`. + + $ docker run -d -p 127.0.0.1:80:5000 training/webapp python app.py + +This would bind port 5000 inside the container to port 80 on the +`localhost` or `127.0.0.1` interface on the host machine. + +Or, to bind port 5000 of the container to a dynamic port but only on the +`localhost`, you could use: + + $ docker run -d -p 127.0.0.1::5000 training/webapp python app.py + +You can also bind UDP ports by adding a trailing `/udp`. For example: + + $ docker run -d -p 127.0.0.1:80:5000/udp training/webapp python app.py + +You also learned about the useful `docker port` shortcut which showed us the +current port bindings. This is also useful for showing you specific port +configurations. For example, if you've bound the container port to the +`localhost` on the host machine, then the `docker port` output will reflect that. + + $ docker port nostalgic_morse 5000 + 127.0.0.1:49155 + +> **Note:** +> The `-p` flag can be used multiple times to configure multiple ports. + +## Connect with the linking system + +Network port mappings are not the only way Docker containers can connect +to one another. Docker also has a linking system that allows you to link +multiple containers together and send connection information from one to another. +When containers are linked, information about a source container can be sent to a +recipient container. This allows the recipient to see selected data describing +aspects of the source container. + +### The importance of naming + +To establish links, Docker relies on the names of your containers. +You've already seen that each container you create has an automatically +created name; indeed you've become familiar with our old friend +`nostalgic_morse` during this guide. You can also name containers +yourself. This naming provides two useful functions: + +1. It can be useful to name containers that do specific functions in a way + that makes it easier for you to remember them, for example naming a + container containing a web application `web`. + +2. It provides Docker with a reference point that allows it to refer to other + containers, for example, you can specify to link the container `web` to container `db`. + +You can name your container by using the `--name` flag, for example: + + $ docker run -d -P --name web training/webapp python app.py + +This launches a new container and uses the `--name` flag to +name the container `web`. You can see the container's name using the +`docker ps` command. + + $ docker ps -l + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + aed84ee21bde training/webapp:latest python app.py 12 hours ago Up 2 seconds 0.0.0.0:49154->5000/tcp web + +You can also use `docker inspect` to return the container's name. + + +> **Note:** +> Container names have to be unique. That means you can only call +> one container `web`. If you want to re-use a container name you must delete +> the old container (with `docker rm`) before you can create a new +> container with the same name. As an alternative you can use the `--rm` +> flag with the `docker run` command. This will delete the container +> immediately after it is stopped. + +## Communication across links + +Links allow containers to discover each other and securely transfer information about one +container to another container. When you set up a link, you create a conduit between a +source container and a recipient container. The recipient can then access select data +about the source. To create a link, you use the `--link` flag. First, create a new +container, this time one containing a database. + + $ docker run -d --name db training/postgres + +This creates a new container called `db` from the `training/postgres` +image, which contains a PostgreSQL database. + +Now, you need to delete the `web` container you created previously so you can replace it +with a linked one: + + $ docker rm -f web + +Now, create a new `web` container and link it with your `db` container. + + $ docker run -d -P --name web --link db:db training/webapp python app.py + +This will link the new `web` container with the `db` container you created +earlier. The `--link` flag takes the form: + + --link :alias + +Where `name` is the name of the container we're linking to and `alias` is an +alias for the link name. You'll see how that alias gets used shortly. +The `--link` flag also takes the form: + + --link + +In which case the alias will match the name. You could have written the previous +example as: + + $ docker run -d -P --name web --link db training/webapp python app.py + +Next, inspect your linked containers with `docker inspect`: + + $ docker inspect -f "{{ .HostConfig.Links }}" web + [/db:/web/db] + +You can see that the `web` container is now linked to the `db` container +`web/db`. Which allows it to access information about the `db` container. + +So what does linking the containers actually do? You've learned that a link allows a +source container to provide information about itself to a recipient container. In +our example, the recipient, `web`, can access information about the source `db`. To do +this, Docker creates a secure tunnel between the containers that doesn't need to +expose any ports externally on the container; you'll note when we started the +`db` container we did not use either the `-P` or `-p` flags. That's a big benefit of +linking: we don't need to expose the source container, here the PostgreSQL database, to +the network. + +Docker exposes connectivity information for the source container to the +recipient container in two ways: + +* Environment variables, +* Updating the `/etc/hosts` file. + +### Environment variables + +Docker creates several environment variables when you link containers. Docker +automatically creates environment variables in the target container based on +the `--link` parameters. It will also expose all environment variables +originating from Docker from the source container. These include variables from: + +* the `ENV` commands in the source container's Dockerfile +* the `-e`, `--env` and `--env-file` options on the `docker run` +command when the source container is started + +These environment variables enable programmatic discovery from within the +target container of information related to the source container. + +> **Warning**: +> It is important to understand that *all* environment variables originating +> from Docker within a container are made available to *any* container +> that links to it. This could have serious security implications if sensitive +> data is stored in them. + +Docker sets an `_NAME` environment variable for each target container +listed in the `--link` parameter. For example, if a new container called +`web` is linked to a database container called `db` via `--link db:webdb`, +then Docker creates a `WEBDB_NAME=/web/webdb` variable in the `web` container. + +Docker also defines a set of environment variables for each port exposed by the +source container. Each variable has a unique prefix in the form: + +`_PORT__` + +The components in this prefix are: + +* the alias `` specified in the `--link` parameter (for example, `webdb`) +* the `` number exposed +* a `` which is either TCP or UDP + +Docker uses this prefix format to define three distinct environment variables: + +* The `prefix_ADDR` variable contains the IP Address from the URL, for +example `WEBDB_PORT_8080_TCP_ADDR=172.17.0.82`. +* The `prefix_PORT` variable contains just the port number from the URL for +example `WEBDB_PORT_8080_TCP_PORT=8080`. +* The `prefix_PROTO` variable contains just the protocol from the URL for +example `WEBDB_PORT_8080_TCP_PROTO=tcp`. + +If the container exposes multiple ports, an environment variable set is +defined for each one. This means, for example, if a container exposes 4 ports +that Docker creates 12 environment variables, 3 for each port. + +Additionally, Docker creates an environment variable called `_PORT`. +This variable contains the URL of the source container's first exposed port. +The 'first' port is defined as the exposed port with the lowest number. +For example, consider the `WEBDB_PORT=tcp://172.17.0.82:8080` variable. If +that port is used for both tcp and udp, then the tcp one is specified. + +Finally, Docker also exposes each Docker originated environment variable +from the source container as an environment variable in the target. For each +variable Docker creates an `_ENV_` variable in the target +container. The variable's value is set to the value Docker used when it +started the source container. + +Returning back to our database example, you can run the `env` +command to list the specified container's environment variables. + +``` + $ docker run --rm --name web2 --link db:db training/webapp env + . . . + DB_NAME=/web2/db + DB_PORT=tcp://172.17.0.5:5432 + DB_PORT_5432_TCP=tcp://172.17.0.5:5432 + DB_PORT_5432_TCP_PROTO=tcp + DB_PORT_5432_TCP_PORT=5432 + DB_PORT_5432_TCP_ADDR=172.17.0.5 + . . . +``` + +You can see that Docker has created a series of environment variables with +useful information about the source `db` container. Each variable is prefixed +with +`DB_`, which is populated from the `alias` you specified above. If the `alias` +were `db1`, the variables would be prefixed with `DB1_`. You can use these +environment variables to configure your applications to connect to the database +on the `db` container. The connection will be secure and private; only the +linked `web` container will be able to talk to the `db` container. + +### Important notes on Docker environment variables + +Unlike host entries in the [`/etc/hosts` file](#updating-the-etchosts-file), +IP addresses stored in the environment variables are not automatically updated +if the source container is restarted. We recommend using the host entries in +`/etc/hosts` to resolve the IP address of linked containers. + +These environment variables are only set for the first process in the +container. Some daemons, such as `sshd`, will scrub them when spawning shells +for connection. + +### Updating the `/etc/hosts` file + +In addition to the environment variables, Docker adds a host entry for the +source container to the `/etc/hosts` file. Here's an entry for the `web` +container: + + $ docker run -t -i --rm --link db:webdb training/webapp /bin/bash + root@aed84ee21bde:/opt/webapp# cat /etc/hosts + 172.17.0.7 aed84ee21bde + . . . + 172.17.0.5 webdb 6e5cdeb2d300 db + +You can see two relevant host entries. The first is an entry for the `web` +container that uses the Container ID as a host name. The second entry uses the +link alias to reference the IP address of the `db` container. In addition to +the alias you provide, the linked container's name--if unique from the alias +provided to the `--link` parameter--and the linked container's hostname will +also be added in `/etc/hosts` for the linked container's IP address. You can ping +that host now via any of these entries: + + root@aed84ee21bde:/opt/webapp# apt-get install -yqq inetutils-ping + root@aed84ee21bde:/opt/webapp# ping webdb + PING webdb (172.17.0.5): 48 data bytes + 56 bytes from 172.17.0.5: icmp_seq=0 ttl=64 time=0.267 ms + 56 bytes from 172.17.0.5: icmp_seq=1 ttl=64 time=0.250 ms + 56 bytes from 172.17.0.5: icmp_seq=2 ttl=64 time=0.256 ms + +> **Note:** +> In the example, you'll note you had to install `ping` because it was not included +> in the container initially. + +Here, you used the `ping` command to ping the `db` container using its host entry, +which resolves to `172.17.0.5`. You can use this host entry to configure an application +to make use of your `db` container. + +> **Note:** +> You can link multiple recipient containers to a single source. For +> example, you could have multiple (differently named) web containers attached to your +>`db` container. + +If you restart the source container, the linked containers `/etc/hosts` files +will be automatically updated with the source container's new IP address, +allowing linked communication to continue. + + $ docker restart db + db + $ docker run -t -i --rm --link db:db training/webapp /bin/bash + root@aed84ee21bde:/opt/webapp# cat /etc/hosts + 172.17.0.7 aed84ee21bde + . . . + 172.17.0.9 db + +# Next step + +Now that you know how to link Docker containers together, the next step is +learning how to manage data, volumes and mounts inside your containers. + +Go to [Managing Data in Containers](/userguide/dockervolumes). + diff --git a/docs/userguide/dockerrepos.md b/docs/userguide/dockerrepos.md new file mode 100644 index 00000000..e06894ef --- /dev/null +++ b/docs/userguide/dockerrepos.md @@ -0,0 +1,175 @@ + + +# Get started with Docker Hub + +So far you've learned how to use the command line to run Docker on your local host. +You've learned how to [pull down images](/userguide/usingdocker/) to build containers +from existing images and you've learned how to [create your own images](/userguide/dockerimages). + +Next, you're going to learn how to use the [Docker Hub](https://hub.docker.com) to +simplify and enhance your Docker workflows. + +The [Docker Hub](https://hub.docker.com) is a public registry maintained by Docker, +Inc. It contains over 15,000 images you can download and use to build containers. It also +provides authentication, work group structure, workflow tools like webhooks and build +triggers, and privacy tools like private repositories for storing images you don't want +to share publicly. + +## Docker commands and Docker Hub + +Docker itself provides access to Docker Hub services via the `docker search`, +`pull`, `login`, and `push` commands. This page will show you how these commands work. + +### Account creation and login +Typically, you'll want to start by creating an account on Docker Hub (if you haven't +already) and logging in. You can create your account directly on +[Docker Hub](https://hub.docker.com/account/signup/), or by running: + + $ docker login + +This will prompt you for a user name, which will become the public namespace for your +public repositories. +If your user name is available, Docker will prompt you to enter a password and your +e-mail address. It will then automatically log you in. You can now commit and +push your own images up to your repos on Docker Hub. + +> **Note:** +> Your authentication credentials will be stored in the `~/.docker/config.json` +> authentication file in your home directory. + +## Searching for images + +You can search the [Docker Hub](https://hub.docker.com) registry via its search +interface or by using the command line interface. Searching can find images by image +name, user name, or description: + + $ docker search centos + NAME DESCRIPTION STARS OFFICIAL TRUSTED + centos Official CentOS 6 Image as of 12 April 2014 88 + tianon/centos CentOS 5 and 6, created using rinse instea... 21 + ... + +There you can see two example results: `centos` and `tianon/centos`. The second +result shows that it comes from the public repository of a user, named +`tianon/`, while the first result, `centos`, doesn't explicitly list a +repository which means that it comes from the trusted top-level namespace for +[Official Repositories](/docker-hub/official_repos). The `/` character separates +a user's repository from the image name. + +Once you've found the image you want, you can download it with `docker pull `: + + $ docker pull centos + Pulling repository centos + 0b443ba03958: Download complete + 539c0211cd76: Download complete + 511136ea3c5a: Download complete + 7064731afe90: Download complete + + Status: Downloaded newer image for centos + +You now have an image from which you can run containers. + +## Contributing to Docker Hub + +Anyone can pull public images from the [Docker Hub](https://hub.docker.com) +registry, but if you would like to share your own images, then you must +register first, as we saw in the [first section of the Docker User +Guide](/userguide/dockerhub/). + +## Pushing a repository to Docker Hub + +In order to push a repository to its registry, you need to have named an image +or committed your container to a named image as we saw +[here](/userguide/dockerimages). + +Now you can push this repository to the registry designated by its name or tag. + + $ docker push yourname/newimage + +The image will then be uploaded and available for use by your team-mates and/or the +community. + +## Features of Docker Hub + +Let's take a closer look at some of the features of Docker Hub. You can find more +information [here](https://docs.docker.com/docker-hub/). + +* Private repositories +* Organizations and teams +* Automated Builds +* Webhooks + +### Private repositories + +Sometimes you have images you don't want to make public and share with +everyone. So Docker Hub allows you to have private repositories. You can +sign up for a plan [here](https://registry.hub.docker.com/plans/). + +### Organizations and teams + +One of the useful aspects of private repositories is that you can share +them only with members of your organization or team. Docker Hub lets you +create organizations where you can collaborate with your colleagues and +manage private repositories. You can learn how to create and manage an organization +[here](https://registry.hub.docker.com/account/organizations/). + +### Automated Builds + +Automated Builds automate the building and updating of images from +[GitHub](https://www.github.com) or [Bitbucket](http://bitbucket.com), directly on Docker +Hub. It works by adding a commit hook to your selected GitHub or Bitbucket repository, +triggering a build and update when you push a commit. + +#### To setup an Automated Build + +1. Create a [Docker Hub account](https://hub.docker.com/) and login. +2. Link your GitHub or Bitbucket account through the ["Link Accounts"](https://registry.hub.docker.com/account/accounts/) menu. +3. [Configure an Automated Build](https://registry.hub.docker.com/builds/add/). +4. Pick a GitHub or Bitbucket project that has a `Dockerfile` that you want to build. +5. Pick the branch you want to build (the default is the `master` branch). +6. Give the Automated Build a name. +7. Assign an optional Docker tag to the Build. +8. Specify where the `Dockerfile` is located. The default is `/`. + +Once the Automated Build is configured it will automatically trigger a +build and, in a few minutes, you should see your new Automated Build on the [Docker Hub](https://hub.docker.com) +Registry. It will stay in sync with your GitHub and Bitbucket repository until you +deactivate the Automated Build. + +To check the output and status of your Automated Build repositories, click on a repository name within the ["Your Repositories" page](https://registry.hub.docker.com/repos/). Automated Builds are indicated by a check-mark icon next to the repository name. Within the repository details page, you may click on the "Build Details" tab to view the status and output of all builds triggered by the Docker Hub. + +Once you've created an Automated Build you can deactivate or delete it. You +cannot, however, push to an Automated Build with the `docker push` command. +You can only manage it by committing code to your GitHub or Bitbucket +repository. + +You can create multiple Automated Builds per repository and configure them +to point to specific `Dockerfile`'s or Git branches. + +#### Build triggers + +Automated Builds can also be triggered via a URL on Docker Hub. This +allows you to rebuild an Automated build image on demand. + +### Webhooks + +Webhooks are attached to your repositories and allow you to trigger an +event when an image or updated image is pushed to the repository. With +a webhook you can specify a target URL and a JSON payload that will be +delivered when the image is pushed. + +See the Docker Hub documentation for [more information on +webhooks](https://docs.docker.com/docker-hub/repos/#webhooks) + +## Next steps + +Go and use Docker! diff --git a/docs/userguide/dockervolumes.md b/docs/userguide/dockervolumes.md new file mode 100644 index 00000000..aa624a67 --- /dev/null +++ b/docs/userguide/dockervolumes.md @@ -0,0 +1,241 @@ + + +# Managing data in containers + +So far we've been introduced to some [basic Docker +concepts](/userguide/usingdocker/), seen how to work with [Docker +images](/userguide/dockerimages/) as well as learned about [networking +and links between containers](/userguide/dockerlinks/). In this section +we're going to discuss how you can manage data inside and between your +Docker containers. + +We're going to look at the two primary ways you can manage data in +Docker. + +* Data volumes, and +* Data volume containers. + +## Data volumes + +A *data volume* is a specially-designated directory within one or more +containers that bypasses the [*Union File +System*](/terms/layer/#union-file-system). Data volumes provide several +useful features for persistent or shared data: + +- Volumes are initialized when a container is created. If the container's + base image contains data at the specified mount point, that existing data is + copied into the new volume upon volume initialization. +- Data volumes can be shared and reused among containers. +- Changes to a data volume are made directly. +- Changes to a data volume will not be included when you update an image. +- Data volumes persist even if the container itself is deleted. + +Data volumes are designed to persist data, independent of the container's life +cycle. Docker therefore *never* automatically delete volumes when you remove +a container, nor will it "garbage collect" volumes that are no longer +referenced by a container. + +### Adding a data volume + +You can add a data volume to a container using the `-v` flag with the +`docker create` and `docker run` command. You can use the `-v` multiple times +to mount multiple data volumes. Let's mount a single volume now in our web +application container. + + $ docker run -d -P --name web -v /webapp training/webapp python app.py + +This will create a new volume inside a container at `/webapp`. + +> **Note:** +> You can also use the `VOLUME` instruction in a `Dockerfile` to add one or +> more new volumes to any container created from that image. + +Docker volumes default to mount in read-write mode, but you can also set it to be mounted read-only. + + $ docker run -d -P --name web -v /opt/webapp:ro training/webapp python app.py + + +### Locating a volume + +You can locate the volume on the host by utilizing the 'docker inspect' command. + + $ docker inspect web + +The output will provide details on the container configurations including the +volumes. The output should look something similar to the following: + + ... + Mounts": [ + { + "Name": "fac362...80535", + "Source": "/var/lib/docker/volumes/fac362...80535/_data", + "Destination": "/webapp", + "Driver": "local", + "Mode": "", + "RW": true + } + ] + ... + +You will notice in the above 'Source' is specifying the location on the host and +'Destination' is specifying the volume location inside the container. `RW` shows +if the volume is read/write. + +### Mount a host directory as a data volume + +In addition to creating a volume using the `-v` flag you can also mount a +directory from your Docker daemon's host into a container. + +>**Note**: If you are using Docker Machine on Mac or Windows, your Docker daemon +>only has limited access to your OS X/Windows filesystem. Docker Machine tries +>to auto-share your `/Users` (OS X) or `C:\Users` (Windows) directory - and so +>you can mount files or directories using `docker run -v +>/Users/:/ ...` (OS X) or `docker run -v +>/c/Users/:/virtual machine's filesystem. + + $ docker run -d -P --name web -v /src/webapp:/opt/webapp training/webapp python app.py + +This will mount the host directory, `/src/webapp`, into the container at +`/opt/webapp`. + +> **Note:** +> If the path `/opt/webapp` already exists inside the container's image, its +> contents will be replaced by the contents of `/src/webapp` on the host to stay +> consistent with the expected behavior of `mount` +> +> When using Boot2Docker on Windows through git bash, there might be an issue with the +> way the source directory name is parsed. You can fix it by using a double slash at +> the beginning of the source directory name as explained in [issue #12751](https://github.com/docker/docker/issues/12751) + +This is very useful for testing, for example we can +mount our source code inside the container and see our application at work as +we change the source code. The directory on the host must be specified as an +absolute path and if the directory doesn't exist Docker will automatically +create it for you. + +> **Note:** +> This is not available from a `Dockerfile` due to the portability +> and sharing purpose of built images. The host directory is, by its nature, +> host-dependent, so a host directory specified in a `Dockerfile` probably +> wouldn't work on all hosts. + +Docker volumes default to mount in read-write mode, but you can also set it to be mounted read-only. + + $ docker run -d -P --name web -v /src/webapp:/opt/webapp:ro training/webapp python app.py + +Here we've mounted the same `/src/webapp` directory but we've added the `ro` +option to specify that the mount should be read-only. + +### Mount a host file as a data volume + +The `-v` flag can also be used to mount a single file - instead of *just* +directories - from the host machine. + + $ docker run --rm -it -v ~/.bash_history:/.bash_history ubuntu /bin/bash + +This will drop you into a bash shell in a new container, you will have your bash +history from the host and when you exit the container, the host will have the +history of the commands typed while in the container. + +> **Note:** +> Many tools used to edit files including `vi` and `sed --in-place` may result +> in an inode change. Since Docker v1.1.0, this will produce an error such as +> "*sed: cannot rename ./sedKdJ9Dy: Device or resource busy*". In the case where +> you want to edit the mounted file, it is often easiest to instead mount the +> parent directory. + +## Creating and mounting a data volume container + +If you have some persistent data that you want to share between +containers, or want to use from non-persistent containers, it's best to +create a named Data Volume Container, and then to mount the data from +it. + +Let's create a new named container with a volume to share. +While this container doesn't run an application, it reuses the `training/postgres` +image so that all containers are using layers in common, saving disk space. + + $ docker create -v /dbdata --name dbdata training/postgres /bin/true + +You can then use the `--volumes-from` flag to mount the `/dbdata` volume in another container. + + $ docker run -d --volumes-from dbdata --name db1 training/postgres + +And another: + + $ docker run -d --volumes-from dbdata --name db2 training/postgres + +In this case, if the `postgres` image contained a directory called `/dbdata` +then mounting the volumes from the `dbdata` container hides the +`/dbdata` files from the `postgres` image. The result is only the files +from the `dbdata` container are visible. + +You can use multiple `--volumes-from` parameters to bring together multiple data +volumes from multiple containers. + +You can also extend the chain by mounting the volume that came from the +`dbdata` container in yet another container via the `db1` or `db2` containers. + + $ docker run -d --name db3 --volumes-from db1 training/postgres + +If you remove containers that mount volumes, including the initial `dbdata` +container, or the subsequent containers `db1` and `db2`, the volumes will not +be deleted. To delete the volume from disk, you must explicitly call +`docker rm -v` against the last container with a reference to the volume. This +allows you to upgrade, or effectively migrate data volumes between containers. + +> **Note:** Docker will not warn you when removing a container *without* +> providing the `-v` option to delete its volumes. If you remove containers +> without using the `-v` option, you may end up with "dangling" volumes; +> volumes that are no longer referenced by a container. +> Dangling volumes are difficult to get rid of and can take up a large amount +> of disk space. We're working on improving volume management and you can check +> progress on this in [pull request #14214](https://github.com/docker/docker/pull/14214) + +## Backup, restore, or migrate data volumes + +Another useful function we can perform with volumes is use them for +backups, restores or migrations. We do this by using the +`--volumes-from` flag to create a new container that mounts that volume, +like so: + + $ docker run --volumes-from dbdata -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /dbdata + +Here we've launched a new container and mounted the volume from the +`dbdata` container. We've then mounted a local host directory as +`/backup`. Finally, we've passed a command that uses `tar` to backup the +contents of the `dbdata` volume to a `backup.tar` file inside our +`/backup` directory. When the command completes and the container stops +we'll be left with a backup of our `dbdata` volume. + +You could then restore it to the same container, or another that you've made +elsewhere. Create a new container. + + $ docker run -v /dbdata --name dbdata2 ubuntu /bin/bash + +Then un-tar the backup file in the new container's data volume. + + $ docker run --volumes-from dbdata2 -v $(pwd):/backup ubuntu cd /dbdata && tar xvf /backup/backup.tar + +You can use the techniques above to automate backup, migration and +restore testing using your preferred tools. + +# Next steps + +Now we've learned a bit more about how to use Docker we're going to see how to +combine Docker with the services available on +[Docker Hub](https://hub.docker.com) including Automated Builds and private +repositories. + +Go to [Working with Docker Hub](/userguide/dockerrepos). + diff --git a/docs/userguide/image_management.md b/docs/userguide/image_management.md new file mode 100644 index 00000000..28fef6c0 --- /dev/null +++ b/docs/userguide/image_management.md @@ -0,0 +1,53 @@ + + +# Image management + +The Docker Engine provides a client which you can use to create images on the command line or through a build process. You can run these images in a container or publish them for others to use. Storing the images you create, searching for images you might want, or publishing images others might use are all elements of image management. + +This section provides an overview of the major features and products Docker provides for image management. + + +## Docker Hub + +The [Docker Hub](https://docs.docker.com/docker-hub/) is responsible for centralizing information about user accounts, images, and public name spaces. It has different components: + + - Web UI + - Meta-data store (comments, stars, list public repositories) + - Authentication service + - Tokenization + +There is only one instance of the Docker Hub, run and managed by Docker Inc. This public Hub is useful for most individuals and smaller companies. + +## Docker Registry and the Docker Trusted Registry + +The Docker Registry is a component of Docker's ecosystem. A registry is a +storage and content delivery system, holding named Docker images, available in +different tagged versions. For example, the image `distribution/registry`, with +tags `2.0` and `latest`. Users interact with a registry by using docker push and +pull commands. For example, `docker pull myregistry.com/stevvooe/batman:voice`. + +The Docker Hub has its own registry which, like the Hub itself, is run and managed by Docker. There are other ways to obtain a registry. You can purchase the [Docker Trusted Registry](https://docs.docker.com/dockter-trusted-registry) product to run on your company's network. Alternatively, you can use the Docker Registry component to build a private registry. For information about using a registry, see overview for the [Docker Registry](https://docs.docker.com/registry). + + +## Content Trust + +When transferring data among networked systems, *trust* is a central concern. In +particular, when communicating over an untrusted medium such as the internet, it +is critical to ensure the integrity and publisher of the all the data a system +operates on. You use Docker to push and pull images (data) to a registry. +Content trust gives you the ability to both verify the integrity and the +publisher of all the data received from a registry over any channel. + +[Content trust](/security/trust) is currently only available for users of the +public Docker Hub. It is currently not available for the Docker Trusted Registry +or for private registries. \ No newline at end of file diff --git a/docs/userguide/index.md b/docs/userguide/index.md new file mode 100644 index 00000000..0e5819db --- /dev/null +++ b/docs/userguide/index.md @@ -0,0 +1,129 @@ + + +# Welcome to the Docker user guide + +In the [Introduction](/) you got a taste of what Docker is and how it +works. In this guide we're going to take you through the fundamentals of +using Docker and integrating it into your environment. + +We’ll teach you how to use Docker to: + +* Dockerize your applications. +* Run your own containers. +* Build Docker images. +* Share your Docker images with others. +* And a whole lot more! + +We've broken this guide into major sections that take you through +the Docker life cycle: + +## Getting started with Docker Hub + +*How do I use Docker Hub?* + +Docker Hub is the central hub for Docker. It hosts public Docker images +and provides services to help you build and manage your Docker +environment. To learn more: + +Go to [Using Docker Hub](/userguide/dockerhub). + +## Dockerizing applications: A "Hello world" + +*How do I run applications inside containers?* + +Docker offers a *container-based* virtualization platform to power your +applications. To learn how to Dockerize applications and run them: + +Go to [Dockerizing Applications](/userguide/dockerizing). + +## Working with containers + +*How do I manage my containers?* + +Once you get a grip on running your applications in Docker containers +we're going to show you how to manage those containers. To find out +about how to inspect, monitor and manage containers: + +Go to [Working With Containers](/userguide/usingdocker). + +## Working with Docker images + +*How can I access, share and build my own images?* + +Once you've learnt how to use Docker it's time to take the next step and +learn how to build your own application images with Docker. + +Go to [Working with Docker Images](/userguide/dockerimages). + +## Linking containers together + +Until now we've seen how to build individual applications inside Docker +containers. Now learn how to build whole application stacks with Docker +by linking together multiple Docker containers. + +Go to [Linking Containers Together](/userguide/dockerlinks). + +## Managing data in containers + +Now we know how to link Docker containers together the next step is +learning how to manage data, volumes and mounts inside our containers. + +Go to [Managing Data in Containers](/userguide/dockervolumes). + +## Working with Docker Hub + +Now we've learned a bit more about how to use Docker we're going to see +how to combine Docker with the services available on Docker Hub including +Trusted Builds and private repositories. + +Go to [Working with Docker Hub](/userguide/dockerrepos). + +## Docker Compose + +Docker Compose allows you to define a application's components -- their containers, +configuration, links and volumes -- in a single file. Then a single command +will set everything up and start your application running. + +Go to [Docker Compose user guide](/compose/). + +## Docker Machine + +Docker Machine helps you get Docker Engines up and running quickly. Machine +can set up hosts for Docker Engines on your computer, on cloud providers, +and/or in your data center, and then configure your Docker client to securely +talk to them. + +Go to [Docker Machine user guide](/machine/). + +## Docker Swarm + +Docker Swarm pools several Docker Engines together and exposes them as a single +virtual Docker Engine. It serves the standard Docker API, so any tool that already +works with Docker can now transparently scale up to multiple hosts. + +Go to [Docker Swarm user guide](/swarm/). + +## Getting help + +* [Docker homepage](http://www.docker.com/) +* [Docker Hub](https://hub.docker.com) +* [Docker blog](http://blog.docker.com/) +* [Docker documentation](https://docs.docker.com/) +* [Docker Getting Started Guide](http://www.docker.com/gettingstarted/) +* [Docker code on GitHub](https://github.com/docker/docker) +* [Docker mailing + list](https://groups.google.com/forum/#!forum/docker-user) +* Docker on IRC: irc.freenode.net and channel #docker +* [Docker on Twitter](http://twitter.com/docker) +* Get [Docker help](http://stackoverflow.com/search?q=docker) on + StackOverflow +* [Docker.com](http://www.docker.com/) + diff --git a/docs/userguide/labels-custom-metadata.md b/docs/userguide/labels-custom-metadata.md new file mode 100644 index 00000000..2be7f859 --- /dev/null +++ b/docs/userguide/labels-custom-metadata.md @@ -0,0 +1,201 @@ + + +# Apply custom metadata + +You can apply metadata to your images, containers, or daemons via +labels. Metadata can serve a wide range of uses. Use labels to add notes or +licensing information to an image or to identify a host. + +A label is a `` / `` pair. Docker stores the label values as +*strings*. You can specify multiple labels but each `` / `` must be +unique to avoid overwriting. If you specify the same `key` several times but with +different values, newer labels overwrite previous labels. Docker uses +the last `key=value` you supply. + +>**Note:** Support for daemon-labels was added in Docker 1.4.1. Labels on +>containers and images are new in Docker 1.6.0 + +## Label keys (namespaces) + +Docker puts no hard restrictions on the label `key` you. However, labels with +simple keys can conflict. For example, you can categorize your images by using a +chip "architecture" label: + + LABEL architecture="amd64" + + LABEL architecture="ARMv7" + +But a user can label images by building architectural style: + + LABEL architecture="Art Nouveau" + +To prevent naming conflicts, Docker namespaces label keys using a reverse domain +notation. Use the following guidelines to name your keys: + +- All (third-party) tools should prefix their keys with the + reverse DNS notation of a domain controlled by the author. For + example, `com.example.some-label`. + +- The `com.docker.*`, `io.docker.*` and `org.dockerproject.*` namespaces are + reserved for Docker's internal use. + +- Keys should only consist of lower-cased alphanumeric characters, + dots and dashes (for example, `[a-z0-9-.]`) + +- Keys should start *and* end with an alpha numeric character + +- Keys may not contain consecutive dots or dashes. + +- Keys *without* namespace (dots) are reserved for CLI use. This allows end- + users to add metadata to their containers and images without having to type + cumbersome namespaces on the command-line. + + +These are guidelines and Docker does not *enforce* them. Failing following these +guidelines can result in conflicting labels. If you're building a tool that uses +labels, you *should* use namespaces for your label keys. + + +## Store structured data in labels + +Label values can contain any data type that can be stored as a string. For +example, consider this JSON: + + + { + "Description": "A containerized foobar", + "Usage": "docker run --rm example/foobar [args]", + "License": "GPL", + "Version": "0.0.1-beta", + "aBoolean": true, + "aNumber" : 0.01234, + "aNestedArray": ["a", "b", "c"] + } + +You can store this struct in a label by serializing it to a string first: + + LABEL com.example.image-specs="{\"Description\":\"A containerized foobar\",\"Usage\":\"docker run --rm example\\/foobar [args]\",\"License\":\"GPL\",\"Version\":\"0.0.1-beta\",\"aBoolean\":true,\"aNumber\":0.01234,\"aNestedArray\":[\"a\",\"b\",\"c\"]}" + +While it is *possible* to store structured data in label values, Docker treats +this data as a 'regular' string. This means that Docker doesn't offer ways to +query (filter) based on nested properties. If your tool needs to filter on +nested properties, the tool itself should implement this. + + +## Add labels to images; the `LABEL` instruction + +Adding labels to an image: + + + LABEL [.][=] ... + +The `LABEL` instruction adds a label to your image, optionally setting its value. +Use surrounding quotes or backslashes for labels that contain +white space character: + + LABEL vendor=ACME\ Incorporated + LABEL com.example.version.is-beta + LABEL com.example.version="0.0.1-beta" + LABEL com.example.release-date="2015-02-12" + +The `LABEL` instruction supports setting multiple labels in a single instruction +using this notation: + + LABEL com.example.version="0.0.1-beta" com.example.release-date="2015-02-12" + +Wrapping is allowed by using a backslash (`\`) as continuation marker: + + LABEL vendor=ACME\ Incorporated \ + com.example.is-beta \ + com.example.version="0.0.1-beta" \ + com.example.release-date="2015-02-12" + +Docker recommends you add multiple labels in a single `LABEL` instruction. Using +individual instructions for each label can result in an inefficient image. This +is because each `LABEL` instruction in a Dockerfile produces a new IMAGE layer. + +You can view the labels via the `docker inspect` command: + + $ docker inspect 4fa6e0f0c678 + + ... + "Labels": { + "vendor": "ACME Incorporated", + "com.example.is-beta": "", + "com.example.version": "0.0.1-beta", + "com.example.release-date": "2015-02-12" + } + ... + + # Inspect labels on container + $ docker inspect -f "{{json .Config.Labels }}" 4fa6e0f0c678 + + {"Vendor":"ACME Incorporated","com.example.is-beta":"","com.example.version":"0.0.1-beta","com.example.release-date":"2015-02-12"} + + # Inspect labels on images + $ docker inspect -f "{{json .ContainerConfig.Labels }}" myimage + + +## Query labels + +Besides storing metadata, you can filter images and containers by label. To list all +running containers that the `com.example.is-beta` label: + + # List all running containers that have a `com.example.is-beta` label + $ docker ps --filter "label=com.example.is-beta" + +List all running containers with a `color` label of `blue`: + + $ docker ps --filter "label=color=blue" + +List all images with `vendor` `ACME`: + + $ docker images --filter "label=vendor=ACME" + + +## Daemon labels + + + docker daemon \ + --dns 8.8.8.8 \ + --dns 8.8.4.4 \ + -H unix:///var/run/docker.sock \ + --label com.example.environment="production" \ + --label com.example.storage="ssd" + +These labels appear as part of the `docker info` output for the daemon: + + docker -D info + Containers: 12 + Images: 672 + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Backing Filesystem: extfs + Dirs: 697 + Execution Driver: native-0.2 + Logging Driver: json-file + Kernel Version: 3.13.0-32-generic + Operating System: Ubuntu 14.04.1 LTS + CPUs: 1 + Total Memory: 994.1 MiB + Name: docker.example.com + ID: RC3P:JTCT:32YS:XYSB:YUBG:VFED:AAJZ:W3YW:76XO:D7NN:TEVU:UCRW + Debug mode (server): false + Debug mode (client): true + File Descriptors: 11 + Goroutines: 14 + EventsListeners: 0 + Init Path: /usr/bin/docker + Docker Root Dir: /var/lib/docker + WARNING: No swap limit support + Labels: + com.example.environment=production + com.example.storage=ssd diff --git a/docs/userguide/level1.md b/docs/userguide/level1.md new file mode 100644 index 00000000..74a81d79 --- /dev/null +++ b/docs/userguide/level1.md @@ -0,0 +1,76 @@ + + +Back + +# Dockerfile tutorial + +## Test your Dockerfile knowledge - Level 1 + +### Questions + +
+ What is the Dockerfile instruction to specify the base image ?
+ + +
+ What is the Dockerfile instruction to execute any commands on the current image and commit the results?
+ + +
+ What is the Dockerfile instruction to specify the maintainer of the Dockerfile?
+ + +
+ What is the character used to add comment in Dockerfiles?
+ + +

+

+ + +

+ +
+ +### Fill the Dockerfile +Your best friend Eric Bardin sent you a Dockerfile, but some parts were lost in the ocean. Can you find the missing parts? +
+
+# This is a Dockerfile to create an image with Memcached and Emacs installed. 
+# VERSION 1.0
+# use the ubuntu base image provided by dotCloud + ub
+ E B, eric.bardin@dotcloud.com
+# make sure the package repository is up to date + echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list + apt-get update
+# install memcached +RUN apt-get install -y
+# install emacs + apt-get install -y emacs23 +
+
+ + + +
+

+ +## What's next? + +

In the next level, we will go into more detail about how to specify which command should be executed when the container starts, +which user to use, and how expose a particular port.

+ +Back +Go to the next level diff --git a/docs/userguide/level2.md b/docs/userguide/level2.md new file mode 100644 index 00000000..5f640d40 --- /dev/null +++ b/docs/userguide/level2.md @@ -0,0 +1,100 @@ + + +Back + +#Dockerfile tutorial + +## Test your Dockerfile knowledge - Level 2 + +### Questions: + +
+What is the Dockerfile instruction to specify the base image?
+ +
+ Which Dockerfile instruction sets the default command for your image?
+ +
+ What is the character used to add comments in Dockerfiles?
+ +
+ Which Dockerfile instruction sets the username to use when running the image?
+ +
+ What is the Dockerfile instruction to execute any command on the current image and commit the results?
+ +
+ Which Dockerfile instruction sets ports to be exposed when running the image?
+ +
+ What is the Dockerfile instruction to specify the maintainer of the Dockerfile?
+ +
+ Which Dockerfile instruction lets you trigger a command as soon as the container starts?
+ +
+

+ +

+ + +

+ +
+ +### Fill the Dockerfile +
+Your best friend Roberto Hashioka sent you a Dockerfile, but some parts were lost in the ocean. Can you find the missing parts? +
+
+# Redis
+#
+# VERSION       0.42
+#
+# use the ubuntu base image provided by dotCloud
+  ub
+MAINT Ro Ha roberto.hashioka@dotcloud.com
+# make sure the package repository is up to date + echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list + apt-get update
+# install wget (required for redis installation) + apt-get install -y wget
+# install make (required for redis installation) + apt-get install -y make
+# install gcc (required for redis installation) +RUN apt-get install -y
+# install apache2 + wget http://download.redis.io/redis-stable.tar.gz +tar xvzf redis-stable.tar.gz +cd redis-stable && make && make install
+# launch redis when starting the image + ["redis-server"]
+# run as user daemon + daemon
+# expose port 6379 + 6379 +
+ + +
+

+
+ +## What's next? +

+Thanks for going through our tutorial! We will be posting Level 3 in the future. + +To improve your Dockerfile writing skills even further, visit the Dockerfile best practices page. + +Back to the Docs! diff --git a/docs/userguide/login-web.png b/docs/userguide/login-web.png new file mode 100644 index 00000000..353705ba Binary files /dev/null and b/docs/userguide/login-web.png differ diff --git a/docs/userguide/register-web.png b/docs/userguide/register-web.png new file mode 100644 index 00000000..709badc6 Binary files /dev/null and b/docs/userguide/register-web.png differ diff --git a/docs/userguide/search.png b/docs/userguide/search.png new file mode 100644 index 00000000..187b8d9e Binary files /dev/null and b/docs/userguide/search.png differ diff --git a/docs/userguide/usingdocker.md b/docs/userguide/usingdocker.md new file mode 100644 index 00000000..cdf5f254 --- /dev/null +++ b/docs/userguide/usingdocker.md @@ -0,0 +1,309 @@ + + +# Working with containers + +In the [last section of the Docker User Guide](/userguide/dockerizing) +we launched our first containers. We launched two containers using the +`docker run` command. + +* Containers we ran interactively in the foreground. +* One container we ran daemonized in the background. + +In the process we learned about several Docker commands: + +* `docker ps` - Lists containers. +* `docker logs` - Shows us the standard output of a container. +* `docker stop` - Stops running containers. + +> **Tip:** +> Another way to learn about `docker` commands is our +> [interactive tutorial](https://www.docker.com/tryit/). + +The `docker` client is pretty simple. Each action you can take +with Docker is a command and each command can take a series of +flags and arguments. + + # Usage: [sudo] docker [command] [flags] [arguments] .. + # Example: + $ docker run -i -t ubuntu /bin/bash + +Let's see this in action by using the `docker version` command to return +version information on the currently installed Docker client and daemon. + + $ docker version + +This command will not only provide you the version of Docker client and +daemon you are using, but also the version of Go (the programming +language powering Docker). + + Client version: 0.8.0 + Go version (client): go1.2 + + Git commit (client): cc3a8c8 + Server version: 0.8.0 + + Git commit (server): cc3a8c8 + Go version (server): go1.2 + + Last stable version: 0.8.0 + +## Get Docker command help + +You can display the help for specific Docker commands. The help details the +options and their usage. To see a list of all the possible commands, use the +following: + + $ docker --help + +To see usage for a specific command, specify the command with the `--help` flag: + + $ docker attach --help + + Usage: docker attach [OPTIONS] CONTAINER + + Attach to a running container + + --help=false Print usage + --no-stdin=false Do not attach stdin + --sig-proxy=true Proxy all received signals to the process + +> **Note:** +> For further details and examples of each command, see the +> [command reference](/reference/commandline/cli/) in this guide. + +## Running a web application in Docker + +So now we've learnt a bit more about the `docker` client let's move onto +the important stuff: running more containers. So far none of the +containers we've run did anything particularly useful, so let's +change that by running an example web application in Docker. + +For our web application we're going to run a Python Flask application. +Let's start with a `docker run` command. + + $ docker run -d -P training/webapp python app.py + +Let's review what our command did. We've specified two flags: `-d` and +`-P`. We've already seen the `-d` flag which tells Docker to run the +container in the background. The `-P` flag is new and tells Docker to +map any required network ports inside our container to our host. This +lets us view our web application. + +We've specified an image: `training/webapp`. This image is a +pre-built image we've created that contains a simple Python Flask web +application. + +Lastly, we've specified a command for our container to run: `python app.py`. This launches our web application. + +> **Note:** +> You can see more detail on the `docker run` command in the [command +> reference](/reference/commandline/cli/#run) and the [Docker Run +> Reference](/reference/run/). + +## Viewing our web application container + +Now let's see our running container using the `docker ps` command. + + $ docker ps -l + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + bc533791f3f5 training/webapp:latest python app.py 5 seconds ago Up 2 seconds 0.0.0.0:49155->5000/tcp nostalgic_morse + +You can see we've specified a new flag, `-l`, for the `docker ps` +command. This tells the `docker ps` command to return the details of the +*last* container started. + +> **Note:** +> By default, the `docker ps` command only shows information about running +> containers. If you want to see stopped containers too use the `-a` flag. + +We can see the same details we saw [when we first Dockerized a +container](/userguide/dockerizing) with one important addition in the `PORTS` +column. + + PORTS + 0.0.0.0:49155->5000/tcp + +When we passed the `-P` flag to the `docker run` command Docker mapped any +ports exposed in our image to our host. + +> **Note:** +> We'll learn more about how to expose ports in Docker images when +> [we learn how to build images](/userguide/dockerimages). + +In this case Docker has exposed port 5000 (the default Python Flask +port) on port 49155. + +Network port bindings are very configurable in Docker. In our last example the +`-P` flag is a shortcut for `-p 5000` that maps port 5000 inside the container +to a high port (from *ephemeral port range* which typically ranges from 32768 +to 61000) on the local Docker host. We can also bind Docker containers to +specific ports using the `-p` flag, for example: + + $ docker run -d -p 80:5000 training/webapp python app.py + +This would map port 5000 inside our container to port 80 on our local +host. You might be asking about now: why wouldn't we just want to always +use 1:1 port mappings in Docker containers rather than mapping to high +ports? Well 1:1 mappings have the constraint of only being able to map +one of each port on your local host. Let's say you want to test two +Python applications: both bound to port 5000 inside their own containers. +Without Docker's port mapping you could only access one at a time on the +Docker host. + +So let's now browse to port 49155 in a web browser to +see the application. + +![Viewing the web application](/userguide/webapp1.png). + +Our Python application is live! + +> **Note:** +> If you have used the `boot2docker` virtual machine on OS X, Windows or Linux, +> you'll need to get the IP of the virtual host instead of using localhost. +> You can do this by running the following outside of the `boot2docker` shell +> (i.e., from your comment line or terminal application). +> +> $ boot2docker ip +> The VM's Host only interface IP address is: 192.168.59.103 +> +> In this case you'd browse to http://192.168.59.103:49155 for the above example. + +## A network port shortcut + +Using the `docker ps` command to return the mapped port is a bit clumsy so +Docker has a useful shortcut we can use: `docker port`. To use `docker port` we +specify the ID or name of our container and then the port for which we need the +corresponding public-facing port. + + $ docker port nostalgic_morse 5000 + 0.0.0.0:49155 + +In this case we've looked up what port is mapped externally to port 5000 inside +the container. + +## Viewing the web application's logs + +Let's also find out a bit more about what's happening with our application and +use another of the commands we've learnt, `docker logs`. + + $ docker logs -f nostalgic_morse + * Running on http://0.0.0.0:5000/ + 10.0.2.2 - - [23/May/2014 20:16:31] "GET / HTTP/1.1" 200 - + 10.0.2.2 - - [23/May/2014 20:16:31] "GET /favicon.ico HTTP/1.1" 404 - + +This time though we've added a new flag, `-f`. This causes the `docker +logs` command to act like the `tail -f` command and watch the +container's standard out. We can see here the logs from Flask showing +the application running on port 5000 and the access log entries for it. + +## Looking at our web application container's processes + +In addition to the container's logs we can also examine the processes +running inside it using the `docker top` command. + + $ docker top nostalgic_morse + PID USER COMMAND + 854 root python app.py + +Here we can see our `python app.py` command is the only process running inside +the container. + +## Inspecting our web application container + +Lastly, we can take a low-level dive into our Docker container using the +`docker inspect` command. It returns a JSON hash of useful configuration +and status information about Docker containers. + + $ docker inspect nostalgic_morse + +Let's see a sample of that JSON output. + + [{ + "ID": "bc533791f3f500b280a9626688bc79e342e3ea0d528efe3a86a51ecb28ea20", + "Created": "2014-05-26T05:52:40.808952951Z", + "Path": "python", + "Args": [ + "app.py" + ], + "Config": { + "Hostname": "bc533791f3f5", + "Domainname": "", + "User": "", + . . . + +We can also narrow down the information we want to return by requesting a +specific element, for example to return the container's IP address we would: + + $ docker inspect -f '{{ .NetworkSettings.IPAddress }}' nostalgic_morse + 172.17.0.5 + +## Stopping our web application container + +Okay we've seen web application working. Now let's stop it using the +`docker stop` command and the name of our container: `nostalgic_morse`. + + $ docker stop nostalgic_morse + nostalgic_morse + +We can now use the `docker ps` command to check if the container has +been stopped. + + $ docker ps -l + +## Restarting our web application container + +Oops! Just after you stopped the container you get a call to say another +developer needs the container back. From here you have two choices: you +can create a new container or restart the old one. Let's look at +starting our previous container back up. + + $ docker start nostalgic_morse + nostalgic_morse + +Now quickly run `docker ps -l` again to see the running container is +back up or browse to the container's URL to see if the application +responds. + +> **Note:** +> Also available is the `docker restart` command that runs a stop and +> then start on the container. + +## Removing our web application container + +Your colleague has let you know that they've now finished with the container +and won't need it again. So let's remove it using the `docker rm` command. + + $ docker rm nostalgic_morse + Error: Impossible to remove a running container, please stop it first or use -f + 2014/05/24 08:12:56 Error: failed to remove one or more containers + +What happened? We can't actually remove a running container. This protects +you from accidentally removing a running container you might need. Let's try +this again by stopping the container first. + + $ docker stop nostalgic_morse + nostalgic_morse + $ docker rm nostalgic_morse + nostalgic_morse + +And now our container is stopped and deleted. + +> **Note:** +> Always remember that deleting a container is final! + +# Next steps + +Until now we've only used images that we've downloaded from +[Docker Hub](https://hub.docker.com). Next, let's get introduced to +building and sharing our own images. + +Go to [Working with Docker Images](/userguide/dockerimages). + diff --git a/docs/userguide/webapp1.png b/docs/userguide/webapp1.png new file mode 100644 index 00000000..b92cc87d Binary files /dev/null and b/docs/userguide/webapp1.png differ diff --git a/experimental/README.md b/experimental/README.md new file mode 100644 index 00000000..6c9b2168 --- /dev/null +++ b/experimental/README.md @@ -0,0 +1,72 @@ +# Docker Experimental Features + +This page contains a list of features in the Docker engine which are +experimental. Experimental features are **not** ready for production. They are +provided for test and evaluation in your sandbox environments. + +The information below describes each feature and the GitHub pull requests and +issues associated with it. If necessary, links are provided to additional +documentation on an issue. As an active Docker user and community member, +please feel free to provide any feedback on these features you wish. + +## Install Docker experimental + +Unlike the regular Docker binary, the experimental channels is built and updated nightly on TO.BE.ANNOUNCED. From one day to the next, new features may appear, while existing experimental features may be refined or entirely removed. + +1. Verify that you have `curl` installed. + + $ which curl + + If `curl` isn't installed, install it after updating your manager: + + $ sudo apt-get update + $ sudo apt-get install curl + +2. Get the latest Docker package. + + $ curl -sSL https://experimental.docker.com/ | sh + + The system prompts you for your `sudo` password. Then, it downloads and + installs Docker and its dependencies. + + >**Note**: If your company is behind a filtering proxy, you may find that the + >`apt-key` + >command fails for the Docker repo during installation. To work around this, + >add the key directly using the following: + > + > $ curl -sSL https://experimental.docker.com/gpg | sudo apt-key add - + +3. Verify `docker` is installed correctly. + + $ sudo docker run hello-world + + This command downloads a test image and runs it in a container. + +### Get the Linux binary +To download the latest experimental `docker` binary for Linux, +use the following URLs: + + https://experimental.docker.com/builds/Linux/i386/docker-latest + + https://experimental.docker.com/builds/Linux/x86_64/docker-latest + +After downloading the appropriate binary, you can follow the instructions +[here](https://docs.docker.com/installation/binaries/#get-the-docker-binary) to run the `docker` daemon. + +> **Note** +> +> 1) You can get the MD5 and SHA256 hashes by appending .md5 and .sha256 to the URLs respectively +> +> 2) You can get the compressed binaries by appending .tgz to the URLs + +## Current experimental features + +* [Network plugins](plugins_network.md) +* [Native Multi-host networking](networking.md) +* [Compose, Swarm and networking integration](compose_swarm_networking.md) + +## How to comment on an experimental feature + +Each feature's documentation includes a list of proposal pull requests or PRs associated with the feature. If you want to comment on or suggest a change to a feature, please add it to the existing feature PR. + +Issues or problems with a feature? Inquire for help on the `#docker` IRC channel or in on the [Docker Google group](https://groups.google.com/forum/#!forum/docker-user). diff --git a/experimental/compose_swarm_networking.md b/experimental/compose_swarm_networking.md new file mode 100644 index 00000000..570f0be7 --- /dev/null +++ b/experimental/compose_swarm_networking.md @@ -0,0 +1,237 @@ +# Experimental: Compose, Swarm and Multi-Host Networking + +The [experimental build of Docker](https://github.com/docker/docker/tree/master/experimental) has an entirely new networking system, which enables secure communication between containers on multiple hosts. In combination with Docker Swarm and Docker Compose, you can now run multi-container apps on multi-host clusters with the same tooling and configuration format you use to develop them locally. + +> Note: This functionality is in the experimental stage, and contains some hacks and workarounds which will be removed as it matures. + +## Prerequisites + +Before you start, you’ll need to install the experimental build of Docker, and the latest versions of Machine and Compose. + +- To install the experimental Docker build on a Linux machine, follow the instructions [here](https://github.com/docker/docker/tree/master/experimental#install-docker-experimental). + +- To install the experimental Docker build on a Mac, run these commands: + + $ curl -L https://experimental.docker.com/builds/Darwin/x86_64/docker-latest > /usr/local/bin/docker + $ chmod +x /usr/local/bin/docker + +- To install Machine, follow the instructions [here](http://docs.docker.com/machine/). + +- To install Compose, follow the instructions [here](http://docs.docker.com/compose/install/). + +You’ll also need a [Docker Hub](https://hub.docker.com/account/signup/) account and a [Digital Ocean](https://www.digitalocean.com/) account. + +## Set up a swarm with multi-host networking + +Set the `DIGITALOCEAN_ACCESS_TOKEN` environment variable to a valid Digital Ocean API token, which you can generate in the [API panel](https://cloud.digitalocean.com/settings/applications). + + export DIGITALOCEAN_ACCESS_TOKEN=abc12345 + +Start a consul server: + + docker-machine --debug create \ + -d digitalocean \ + --engine-install-url="https://experimental.docker.com" \ + consul + + docker $(docker-machine config consul) run -d \ + -p "8500:8500" \ + -h "consul" \ + progrium/consul -server -bootstrap + +(In a real world setting you’d set up a distributed consul, but that’s beyond the scope of this guide!) + +Create a Swarm token: + + export SWARM_TOKEN=$(docker run swarm create) + +Next, you create a Swarm master with Machine: + + docker-machine --debug create \ + -d digitalocean \ + --digitalocean-image="ubuntu-14-10-x64" \ + --engine-install-url="https://experimental.docker.com" \ + --engine-opt="default-network=overlay:multihost" \ + --engine-opt="kv-store=consul:$(docker-machine ip consul):8500" \ + --engine-label="com.docker.network.driver.overlay.bind_interface=eth0" \ + swarm-0 + +Usually Machine can create Swarms for you, but it doesn't yet fully support multi-host networks yet, so you'll have to start up the Swarm manually: + + docker $(docker-machine config swarm-0) run -d \ + --restart="always" \ + --net="bridge" \ + swarm:latest join \ + --addr "$(docker-machine ip swarm-0):2376" \ + "token://$SWARM_TOKEN" + + docker $(docker-machine config swarm-0) run -d \ + --restart="always" \ + --net="bridge" \ + -p "3376:3376" \ + -v "/etc/docker:/etc/docker" \ + swarm:latest manage \ + --tlsverify \ + --tlscacert="/etc/docker/ca.pem" \ + --tlscert="/etc/docker/server.pem" \ + --tlskey="/etc/docker/server-key.pem" \ + -H "tcp://0.0.0.0:3376" \ + --strategy spread \ + "token://$SWARM_TOKEN" + +Create a Swarm node: + + docker-machine --debug create \ + -d digitalocean \ + --digitalocean-image="ubuntu-14-10-x64" \ + --engine-install-url="https://experimental.docker.com" \ + --engine-opt="default-network=overlay:multihost" \ + --engine-opt="kv-store=consul:$(docker-machine ip consul):8500" \ + --engine-label="com.docker.network.driver.overlay.bind_interface=eth0" \ + --engine-label="com.docker.network.driver.overlay.neighbor_ip=$(docker-machine ip swarm-0)" \ + swarm-1 + + docker $(docker-machine config swarm-1) run -d \ + --restart="always" \ + --net="bridge" \ + swarm:latest join \ + --addr "$(docker-machine ip swarm-1):2376" \ + "token://$SWARM_TOKEN" + +You can create more Swarm nodes if you want - it’s best to give them sensible names (swarm-2, swarm-3, etc). + +Finally, point Docker at your swarm: + + export DOCKER_HOST=tcp://"$(docker-machine ip swarm-0):3376" + export DOCKER_TLS_VERIFY=1 + export DOCKER_CERT_PATH="$HOME/.docker/machine/machines/swarm-0" + +## Run containers and get them communicating + +Now that you’ve got a swarm up and running, you can create containers on it just like a single Docker instance: + + $ docker run busybox echo hello world + hello world + +If you run `docker ps -a`, you can see what node that container was started on by looking at its name (here it’s swarm-3): + + $ docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 41f59749737b busybox "echo hello world" 15 seconds ago Exited (0) 13 seconds ago swarm-3/trusting_leakey + +As you start more containers, they’ll be placed on different nodes across the cluster, thanks to Swarm’s default “spread” scheduling strategy. + +Every container started on this swarm will use the “overlay:multihost” network by default, meaning they can all intercommunicate. Each container gets an IP address on that network, and an `/etc/hosts` file which will be updated on-the-fly with every other container’s IP address and name. That means that if you have a running container named ‘foo’, other containers can access it at the hostname ‘foo’. + +Let’s verify that multi-host networking is functioning. Start a long-running container: + + $ docker run -d --name long-running busybox top + + +If you start a new container and inspect its /etc/hosts file, you’ll see the long-running container in there: + + $ docker run busybox cat /etc/hosts + ... + 172.21.0.6 long-running + +Verify that connectivity works between containers: + + $ docker run busybox ping long-running + PING long-running (172.21.0.6): 56 data bytes + 64 bytes from 172.21.0.6: seq=0 ttl=64 time=7.975 ms + 64 bytes from 172.21.0.6: seq=1 ttl=64 time=1.378 ms + 64 bytes from 172.21.0.6: seq=2 ttl=64 time=1.348 ms + ^C + --- long-running ping statistics --- + 3 packets transmitted, 3 packets received, 0% packet loss + round-trip min/avg/max = 1.140/2.099/7.975 ms + +## Run a Compose application + +Here’s an example of a simple Python + Redis app using multi-host networking on a swarm. + +Create a directory for the app: + + $ mkdir composetest + $ cd composetest + +Inside this directory, create 2 files. + +First, create `app.py` - a simple web app that uses the Flask framework and increments a value in Redis: + + from flask import Flask + from redis import Redis + import os + app = Flask(__name__) + redis = Redis(host='composetest_redis_1', port=6379) + + @app.route('/') + def hello(): + redis.incr('hits') + return 'Hello World! I have been seen %s times.' % redis.get('hits') + + if __name__ == "__main__": + app.run(host="0.0.0.0", debug=True) + +Note that we’re connecting to a host called `composetest_redis_1` - this is the name of the Redis container that Compose will start. + +Second, create a Dockerfile for the app container: + + FROM python:2.7 + RUN pip install flask redis + ADD . /code + WORKDIR /code + CMD ["python", "app.py"] + +Build the Docker image and push it to the Hub (you’ll need a Hub account). Replace `` with your Docker Hub username: + + $ docker build -t /counter . + $ docker push /counter + +Next, create a `docker-compose.yml`, which defines the configuration for the web and redis containers. Once again, replace `` with your Hub username: + + web: + image: /counter + ports: + - "80:5000" + redis: + image: redis + +Now start the app: + + $ docker-compose up -d + Pulling web (username/counter:latest)... + swarm-0: Pulling username/counter:latest... : downloaded + swarm-2: Pulling username/counter:latest... : downloaded + swarm-1: Pulling username/counter:latest... : downloaded + swarm-3: Pulling username/counter:latest... : downloaded + swarm-4: Pulling username/counter:latest... : downloaded + Creating composetest_web_1... + Pulling redis (redis:latest)... + swarm-2: Pulling redis:latest... : downloaded + swarm-1: Pulling redis:latest... : downloaded + swarm-3: Pulling redis:latest... : downloaded + swarm-4: Pulling redis:latest... : downloaded + swarm-0: Pulling redis:latest... : downloaded + Creating composetest_redis_1... + +Swarm has created containers for both web and redis, and placed them on different nodes, which you can check with `docker ps`: + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 92faad2135c9 redis "/entrypoint.sh redi 43 seconds ago Up 42 seconds swarm-2/composetest_redis_1 + adb809e5cdac username/counter "/bin/sh -c 'python 55 seconds ago Up 54 seconds 45.67.8.9:80->5000/tcp swarm-1/composetest_web_1 + +You can also see that the web container has exposed port 80 on its swarm node. If you curl that IP, you’ll get a response from the container: + + $ curl http://45.67.8.9 + Hello World! I have been seen 1 times. + +If you hit it repeatedly, the counter will increment, demonstrating that the web and redis container are communicating: + + $ curl http://45.67.8.9 + Hello World! I have been seen 2 times. + $ curl http://45.67.8.9 + Hello World! I have been seen 3 times. + $ curl http://45.67.8.9 + Hello World! I have been seen 4 times. diff --git a/experimental/networking.md b/experimental/networking.md new file mode 100644 index 00000000..34517812 --- /dev/null +++ b/experimental/networking.md @@ -0,0 +1,135 @@ +# Experimental: Networking and Services + +In this feature: + +- `network` and `service` become first class objects in the Docker UI + - one can now create networks, publish services on that network and attach containers to the services +- Native multi-host networking + - `network` and `service` objects are globally significant and provides multi-host container connectivity natively +- Inbuilt simple Service Discovery + - With multi-host networking and top-level `service` object, Docker now provides out of the box simple Service Discovery for containers running in a network +- Batteries included but removable + - Docker provides inbuilt native multi-host networking by default & can be swapped by any remote driver provided by external plugins. + +This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](README.md). + +## Using Networks + + Usage: docker network [OPTIONS] COMMAND [OPTIONS] [arg...] + + Commands: + create Create a network + rm Remove a network + ls List all networks + info Display information of a network + + Run 'docker network COMMAND --help' for more information on a command. + + --help=false Print usage + +The `docker network` command is used to manage Networks. + +To create a network, `docker network create foo`. You can also specify a driver +if you have loaded a networking plugin e.g `docker network create -d foo` + + $ docker network create foo + aae601f43744bc1f57c515a16c8c7c4989a2cad577978a32e6910b799a6bccf6 + $ docker network create -d overlay bar + d9989793e2f5fe400a58ef77f706d03f668219688ee989ea68ea78b990fa2406 + +`docker network ls` is used to display the currently configured networks + + $ docker network ls + NETWORK ID NAME TYPE + d367e613ff7f none null + bd61375b6993 host host + cc455abccfeb bridge bridge + aae601f43744 foo bridge + d9989793e2f5 bar overlay + +To get detailed information on a network, you can use the `docker network info` +command. + + $ docker network info foo + Network Id: aae601f43744bc1f57c515a16c8c7c4989a2cad577978a32e6910b799a6bccf6 + Name: foo + Type: null + +If you no longer have need of a network, you can delete it with `docker network rm` + + $ docker network rm bar + bar + $ docker network ls + NETWORK ID NAME TYPE + aae601f43744 foo bridge + d367e613ff7f none null + bd61375b6993 host host + cc455abccfeb bridge bridge + +## User-Defined default network + +Docker daemon supports a configuration flag `--default-network` which takes configuration value of format `DRIVER:NETWORK`, where, +`DRIVER` represents the in-built drivers such as bridge, overlay, container, host and none. or Remote drivers via Network Plugins. +`NETWORK` is the name of the network created using the `docker network create` command +When a container is created and if the network mode (`--net`) is not specified, then this default network will be used to connect +the container. If `--default-network` is not specified, the default network will be the `bridge` driver. +Example : `docker -d --default-network=overlay:multihost` + +## Using Services + + Usage: docker service COMMAND [OPTIONS] [arg...] + + Commands: + publish Publish a service + unpublish Remove a service + attach Attach a backend (container) to the service + detach Detach the backend from the service + ls Lists all services + info Display information about a service + + Run 'docker service COMMAND --help' for more information on a command. + + --help=false Print usage + +Assuming we want to publish a service from container `a0ebc12d3e48` on network `foo` as `my-service` we would use the following command: + + $ docker service publish my-service.foo + ec56fd74717d00f968c26675c9a77707e49ae64b8e54832ebf78888eb116e428 + $ docker service attach a0ebc12d3e48 my-service.foo + +This would make the container `a0ebc12d3e48` accessible as `my-service` on network `foo`. Any other container in network `foo` can use DNS to resolve the address of `my-service` + +This can also be acheived by using the `--publish-service` flag for `docker run`: + + docker run -itd --publish-service db.foo postgres + +`db.foo` in this instance means "place the container on network `foo`, and allow other hosts on `foo` to discover it under the name `db`" + +We can see the current services using the `docker service ls` command + + $ docker service ls + SERVICE ID NAME NETWORK PROVIDER + ec56fd74717d my-service foo a0ebc12d3e48 + +To remove the a service: + + $ docker service detach a0ebc12d3e48 my-service.foo + $ docker service unpublish my-service.foo + + +## Native Multi-host networking + +There is a lot to talk about the native multi-host networking and the `overlay` driver that makes it happen. The technical details are documented under https://github.com/docker/libnetwork/blob/master/docs/overlay.md. +Using the above experimental UI `docker network`, `docker service` and `--publish-service`, the user can exercise the power of multi-host networking. + +Since `network` and `service` objects are globally significant, this feature requires distributed states provided by the `libkv` project. +Using `libkv`, the user can plug any of the supported Key-Value store (such as consul, etcd or zookeeper). +User can specify the Key-Value store of choice using the `--kv-store` daemon flag, which takes configuration value of format `PROVIDER:URL`, where +`PROVIDER` is the name of the Key-Value store (such as consul, etcd or zookeeper) and +`URL` is the url to reach the Key-Value store. +Example : `docker -d --kv-store=consul:localhost:8500` + + +Send us feedback and comments on [#14083](https://github.com/docker/docker/issues/14083) +or on the usual Google Groups (docker-user, docker-dev) and IRC channels. + diff --git a/experimental/networking_api.md b/experimental/networking_api.md new file mode 100644 index 00000000..829c1587 --- /dev/null +++ b/experimental/networking_api.md @@ -0,0 +1,489 @@ +# Networking API + +### List networks + +`GET /networks` + +List networks + +**Example request**: + + GET /networks HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "name": "none", + "id": "8e4e55c6863ef4241c548c1c6fc77289045e9e5d5b5e4875401a675326981898", + "type": "null", + "endpoints": [] + }, + { + "name": "host", + "id": "062b6d9ea7913fde549e2d186ff0402770658f8c4e769958e1b943ff4e675011", + "type": "host", + "endpoints": [] + }, + { + "name": "bridge", + "id": "a87dd9a9d58f030962df1c15fb3fa142fbd9261339de458bc89be1895cef2c70", + "type": "bridge", + "endpoints": [] + } + ] + +Query Parameters: + +- **name** – Filter results with the given name +- **partial-id** – Filter results using the partial network ID + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a Network + +`POST /networks` + +**Example request** + + POST /networks HTTP/1.1 + Content-Type: application/json + + { + "name": "foo", + "network_type": "", + "options": {} + } + +**Example Response** + + HTTP/1.1 200 OK + "32fbf63200e2897f5de72cb2a4b653e4b1a523b15116e96e3d73f7849e583653", + +Status Codes: + +- **200** – no error +- **400** – bad request +- **500** – server error + +### Get a network + +`GET /networks/` + +Get a network + +**Example request**: + + GET /networks/32fbf63200e2897f5de72cb2a4b653e4b1a523b15116e96e3d73f7849e583653 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "name": "foo", + "id": "32fbf63200e2897f5de72cb2a4b653e4b1a523b15116e96e3d73f7849e583653", + "type": "bridge", + "endpoints": [] + } + +Status Codes: + +- **200** – no error +- **404** – not found +- **500** – server error + +### List a networks endpoints + +`GET /networks//endpoints` + +**Example request** + + GET /networks/32fbf63200e2897f5de72cb2a4b653e4b1a523b15116e96e3d73f7849e583653/endpoints HTTP/1.1 + +**Example Response** + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "id": "7e0c116b882ee489a8a5345a2638c0129099aa47f4ba114edde34e75c1e4ae0d", + "name": "/lonely_pasteur", + "network": "foo" + } + ] + +Query Parameters: + +- **name** – Filter results with the given name +- **partial-id** – Filter results using the partial network ID + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create an endpoint on a network + +`POST /networks//endpoints` + +**Example request** + + POST /networks/32fbf63200e2897f5de72cb2a4b653e4b1a523b15116e96e3d73f7849e583653/endpoints HTTP/1.1 + Content-Type: application/json + + { + "name": "baz", + "exposed_ports": [ + { + "proto": 6, + "port": 8080 + } + ], + "port_mapping": null + } + +**Example Response** + + HTTP/1.1 200 OK + Content-Type: application/json + + "b18b795af8bad85cdd691ff24ffa2b08c02219d51992309dd120322689d2ab5a" + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Get an endpoint + +`GET /networks//endpoints/` + +**Example request** + + GET /networks/32fbf63200e2897f5de72cb2a4b653e4b1a523b15116e96e3d73f7849e583653/endpoints/b18b795af8bad85cdd691ff24ffa2b08c02219d51992309dd120322689d2ab5a HTTP/1.1 + +**Example Response** + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id": "b18b795af8bad85cdd691ff24ffa2b08c02219d51992309dd120322689d2ab5a", + "name": "baz", + "network": "foo" + } + +Status Codes: + +- **200** – no error +- **404** - not found +- **500** – server error + +### Join an endpoint to a container + +`POST /networks//endpoints//containers` + +**Example request** + + POST /networks/32fbf63200e2897f5de72cb2a4b653e4b1a523b15116e96e3d73f7849e583653//endpoints/b18b795af8bad85cdd691ff24ffa2b08c02219d51992309dd120322689d2ab5a/containers HTTP/1.1 + Content-Type: application/json + + { + "container_id": "e76f406417031bd24c17aeb9bb2f5968b628b9fb6067da264b234544754bf857", + "host_name": null, + "domain_name": null, + "hosts_path": null, + "resolv_conf_path": null, + "dns": null, + "extra_hosts": null, + "parent_updates": null, + "use_default_sandbox": true + } + +**Example response** + + HTTP/1.1 200 OK + Content-Type: application/json + + "/var/run/docker/netns/e76f40641703" + + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** - not found +- **500** – server error + +### Detach an endpoint from a container + +`DELETE /networks//endpoints//containers/` + +**Example request** + + DELETE /networks/32fbf63200e2897f5de72cb2a4b653e4b1a523b15116e96e3d73f7849e583653/endpoints/b18b795af8bad85cdd691ff24ffa2b08c02219d51992309dd120322689d2ab5a/containers/e76f406417031bd24c17aeb9bb2f5968b628b9fb6067da264b234544754bf857 HTTP/1.1 + Content-Type: application/json + +**Example response** + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** - not found +- **500** – server error + + +### Delete an endpoint + +`DELETE /networks//endpoints/` + +**Example request** + + DELETE /networks/32fbf63200e2897f5de72cb2a4b653e4b1a523b15116e96e3d73f7849e583653/endpoints/b18b795af8bad85cdd691ff24ffa2b08c02219d51992309dd120322689d2ab5a HTTP/1.1 + +**Example Response** + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **404** - not found +- **500** – server error + +### Delete a network + +`DELETE /networks/` + +Delete a network + +**Example request**: + + DELETE /networks/0984d158bd8ae108e4d6bc8fcabedf51da9a174b32cc777026d4a29045654951 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **404** – not found +- **500** – server error + +# Services API + +### Publish a Service + +`POST /services` + +Publish a service + +**Example Request** + + POST /services HTTP/1.1 + Content-Type: application/json + + { + "name": "bar", + "network_name": "foo", + "exposed_ports": null, + "port_mapping": null + } + +**Example Response** + + HTTP/1.1 200 OK + Content-Type: application/json + + "0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff" + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Get a Service + +`GET /services/0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff` + +Get a service + +**Example Request**: + + GET /services/0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff HTTP/1.1 + +**Example Response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "name": "bar", + "id": "0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff", + "network": "foo" + } + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** - not found +- **500** – server error + +### Attach a backend to a service + +`POST /services/0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff/backend` + +Attach a backend to a service + +**Example Request**: + + POST /services/0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff/backend HTTP/1.1 + Content-Type: application/json + + { + "container_id": "98c5241f9475e9efc17e7198e931fb48166010b80f96d48df204e251378ca547", + "host_name": "", + "domain_name": "", + "hosts_path": "", + "resolv_conf_path": "", + "dns": null, + "extra_hosts": null, + "parent_updates": null, + "use_default_sandbox": false + } + +**Example Response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + "/var/run/docker/netns/98c5241f9475" + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Get Backends for a Service + +Get all backends for a given service + +**Example Request** + + GET /services/0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff/backend HTTP/1.1 + +**Example Response** + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "id": "98c5241f9475e9efc17e7198e931fb48166010b80f96d48df204e251378ca547" + } + ] + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### List Services + +`GET /services` + +List services + +**Example request**: + + GET /services HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "name": "/stupefied_stallman", + "id": "c826b26bf736fb4a77db33f83562e59f9a770724e259ab9c3d50d948f8233ae4", + "network": "bridge" + }, + { + "name": "bar", + "id": "0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff", + "network": "foo" + } + ] + +Query Parameters: + +- **name** – Filter results with the given name +- **partial-id** – Filter results using the partial network ID +- **network** - Filter results by the given network + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Detach a Backend from a Service + +`DELETE /services/0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff/backend/98c5241f9475e9efc17e7198e931fb48166010b80f96d48df204e251378ca547` + +Detach a backend from a service + +**Example Request** + + DELETE /services/0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff/backend/98c5241f9475e9efc17e7198e931fb48166010b80f96d48df204e251378ca547 HTTP/1.1 + +**Example Response** + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Un-Publish a Service + +`DELETE /services/0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff` + +Unpublish a service + +**Example Request** + + DELETE /services/0aee0899e6c5e903cf3ef2bdc28a1c9aaf639c8c8c331fa4ae26344d9e32c1ff HTTP/1.1 + +**Example Response** + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error diff --git a/experimental/plugins_network.md b/experimental/plugins_network.md new file mode 100644 index 00000000..0902bee4 --- /dev/null +++ b/experimental/plugins_network.md @@ -0,0 +1,45 @@ +# Experimental: Docker network driver plugins + +Docker supports network driver plugins via +[LibNetwork](https://github.com/docker/libnetwork). Network driver plugins are +implemented as "remote drivers" for LibNetwork, which shares plugin +infrastructure with Docker. In effect this means that network driver plugins +are activated in the same way as other plugins, and use the same kind of +protocol. + +## Using network driver plugins + +The means of installing and running a network driver plugin will depend on the +particular plugin. + +Once running however, network driver plugins are used just like the built-in +network drivers: by being mentioned as a driver in network-oriented Docker +commands. For example, + + docker network create -d weave mynet + +Some network driver plugins are listed in [plugins.md](/docs/extend/plugins.md) + +The network thus created is owned by the plugin, so subsequent commands +referring to that network will also be run through the plugin. + +## Network driver plugin protocol + +The network driver protocol, additional to the plugin activation call, is +documented as part of LibNetwork: +[https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md). + +# Related GitHub PRs and issues + +Please record your feedback in the following issue, on the usual +Google Groups, or the IRC channel #docker-network. + + - [#14083](https://github.com/docker/docker/issues/14083) Feedback on + experimental networking features + +Other pertinent issues: + + - [#13977](https://github.com/docker/docker/issues/13977) UI for using networks + - [#14023](https://github.com/docker/docker/pull/14023) --default-network option + - [#14051](https://github.com/docker/docker/pull/14051) --publish-service option + - [#13441](https://github.com/docker/docker/pull/13441) (Deprecated) Networks API & UI diff --git a/graph/export.go b/graph/export.go new file mode 100644 index 00000000..5c7fbcf1 --- /dev/null +++ b/graph/export.go @@ -0,0 +1,166 @@ +package graph + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" +) + +// CmdImageExport exports all images with the given tag. All versions +// containing the same tag are exported. The resulting output is an +// uncompressed tar ball. +// name is the set of tags to export. +// out is the writer where the images are written to. +type ImageExportConfig struct { + Names []string + Outstream io.Writer +} + +func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error { + + // get image json + tempdir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempdir) + + rootRepoMap := map[string]Repository{} + addKey := func(name string, tag string, id string) { + logrus.Debugf("add key [%s:%s]", name, tag) + if repo, ok := rootRepoMap[name]; !ok { + rootRepoMap[name] = Repository{tag: id} + } else { + repo[tag] = id + } + } + for _, name := range imageExportConfig.Names { + name = registry.NormalizeLocalName(name) + logrus.Debugf("Serializing %s", name) + rootRepo := s.Repositories[name] + if rootRepo != nil { + // this is a base repo name, like 'busybox' + for tag, id := range rootRepo { + addKey(name, tag, id) + if err := s.exportImage(id, tempdir); err != nil { + return err + } + } + } else { + img, err := s.LookupImage(name) + if err != nil { + return err + } + + if img != nil { + // This is a named image like 'busybox:latest' + repoName, repoTag := parsers.ParseRepositoryTag(name) + + // check this length, because a lookup of a truncated has will not have a tag + // and will not need to be added to this map + if len(repoTag) > 0 { + addKey(repoName, repoTag, img.ID) + } + if err := s.exportImage(img.ID, tempdir); err != nil { + return err + } + + } else { + // this must be an ID that didn't get looked up just right? + if err := s.exportImage(name, tempdir); err != nil { + return err + } + } + } + logrus.Debugf("End Serializing %s", name) + } + // write repositories, if there is something to write + if len(rootRepoMap) > 0 { + f, err := os.OpenFile(filepath.Join(tempdir, "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(rootRepoMap); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + } else { + logrus.Debugf("There were no repositories to write") + } + + fs, err := archive.Tar(tempdir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + if _, err := io.Copy(imageExportConfig.Outstream, fs); err != nil { + return err + } + logrus.Debugf("End export image") + return nil +} + +// FIXME: this should be a top-level function, not a class method +func (s *TagStore) exportImage(name, tempdir string) error { + for n := name; n != ""; { + img, err := s.LookupImage(n) + + // temporary directory + tmpImageDir := filepath.Join(tempdir, n) + if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil { + if os.IsExist(err) { + return nil + } + return err + } + + var version = "1.0" + var versionBuf = []byte(version) + + if err := ioutil.WriteFile(filepath.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil { + return err + } + + imageInspectRaw, err := json.Marshal(img) + if err != nil { + return err + } + + // serialize json + json, err := os.Create(filepath.Join(tmpImageDir, "json")) + if err != nil { + return err + } + + written, err := json.Write(imageInspectRaw) + if err != nil { + return err + } + if written != len(imageInspectRaw) { + logrus.Warnf("%d byes should have been written instead %d have been written", written, len(imageInspectRaw)) + } + + // serialize filesystem + fsTar, err := os.Create(filepath.Join(tmpImageDir, "layer.tar")) + if err != nil { + return err + } + if err := s.ImageTarLayer(n, fsTar); err != nil { + return err + } + + n = img.Parent + } + return nil +} diff --git a/graph/fixtures/validate_manifest/bad_manifest b/graph/fixtures/validate_manifest/bad_manifest new file mode 100644 index 00000000..a1f02a62 --- /dev/null +++ b/graph/fixtures/validate_manifest/bad_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 2, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/graph/fixtures/validate_manifest/extra_data_manifest b/graph/fixtures/validate_manifest/extra_data_manifest new file mode 100644 index 00000000..beec19a8 --- /dev/null +++ b/graph/fixtures/validate_manifest/extra_data_manifest @@ -0,0 +1,46 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "fsLayers": [ + { + "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/graph/fixtures/validate_manifest/good_manifest b/graph/fixtures/validate_manifest/good_manifest new file mode 100644 index 00000000..b107de32 --- /dev/null +++ b/graph/fixtures/validate_manifest/good_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} \ No newline at end of file diff --git a/graph/fixtures/validate_manifest/no_signature_manifest b/graph/fixtures/validate_manifest/no_signature_manifest new file mode 100644 index 00000000..7a79540a --- /dev/null +++ b/graph/fixtures/validate_manifest/no_signature_manifest @@ -0,0 +1,22 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ] +} diff --git a/graph/graph.go b/graph/graph.go new file mode 100644 index 00000000..fd73bfc3 --- /dev/null +++ b/graph/graph.go @@ -0,0 +1,752 @@ +package graph + +import ( + "compress/gzip" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// v1ImageDescriptor is a non-content-addressable image descriptor +type v1ImageDescriptor struct { + img *image.Image +} + +// ID returns the image ID specified in the image structure. +func (img v1ImageDescriptor) ID() string { + return img.img.ID +} + +// Parent returns the parent ID specified in the image structure. +func (img v1ImageDescriptor) Parent() string { + return img.img.Parent +} + +// MarshalConfig renders the image structure into JSON. +func (img v1ImageDescriptor) MarshalConfig() ([]byte, error) { + return json.Marshal(img.img) +} + +// The type is used to protect pulling or building related image +// layers from deleteing when filtered by dangling=true +// The key of layers is the images ID which is pulling or building +// The value of layers is a slice which hold layer IDs referenced to +// pulling or building images +type retainedLayers struct { + layerHolders map[string]map[string]struct{} // map[layerID]map[sessionID] + sync.Mutex +} + +func (r *retainedLayers) Add(sessionID string, layerIDs []string) { + r.Lock() + defer r.Unlock() + for _, layerID := range layerIDs { + if r.layerHolders[layerID] == nil { + r.layerHolders[layerID] = map[string]struct{}{} + } + r.layerHolders[layerID][sessionID] = struct{}{} + } +} + +func (r *retainedLayers) Delete(sessionID string, layerIDs []string) { + r.Lock() + defer r.Unlock() + for _, layerID := range layerIDs { + holders, ok := r.layerHolders[layerID] + if !ok { + continue + } + delete(holders, sessionID) + if len(holders) == 0 { + delete(r.layerHolders, layerID) // Delete any empty reference set. + } + } +} + +func (r *retainedLayers) Exists(layerID string) bool { + r.Lock() + _, exists := r.layerHolders[layerID] + r.Unlock() + return exists +} + +// A Graph is a store for versioned filesystem images and the relationship between them. +type Graph struct { + root string + idIndex *truncindex.TruncIndex + driver graphdriver.Driver + imageMutex imageMutex // protect images in driver. + retained *retainedLayers +} + +// file names for ./graph// +const ( + jsonFileName = "json" + layersizeFileName = "layersize" + digestFileName = "checksum" + tarDataFileName = "tar-data.json.gz" + v1CompatibilityFileName = "v1Compatibility" + parentFileName = "parent" +) + +var ( + // ErrDigestNotSet is used when request the digest for a layer + // but the layer has no digest value or content to compute the + // the digest. + ErrDigestNotSet = errors.New("digest is not set for layer") +) + +// NewGraph instantiates a new graph at the given root path in the filesystem. +// `root` will be created if it doesn't exist. +func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) { + abspath, err := filepath.Abs(root) + if err != nil { + return nil, err + } + // Create the root directory if it doesn't exists + if err := system.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + graph := &Graph{ + root: abspath, + idIndex: truncindex.NewTruncIndex([]string{}), + driver: driver, + retained: &retainedLayers{layerHolders: make(map[string]map[string]struct{})}, + } + if err := graph.restore(); err != nil { + return nil, err + } + return graph, nil +} + +// IsHeld returns whether the given layerID is being used by an ongoing pull or build. +func (graph *Graph) IsHeld(layerID string) bool { + return graph.retained.Exists(layerID) +} + +func (graph *Graph) restore() error { + dir, err := ioutil.ReadDir(graph.root) + if err != nil { + return err + } + var ids = []string{} + for _, v := range dir { + id := v.Name() + if graph.driver.Exists(id) { + ids = append(ids, id) + } + } + + baseIds, err := graph.restoreBaseImages() + if err != nil { + return err + } + ids = append(ids, baseIds...) + + graph.idIndex = truncindex.NewTruncIndex(ids) + logrus.Debugf("Restored %d elements", len(ids)) + return nil +} + +// FIXME: Implement error subclass instead of looking at the error text +// Note: This is the way golang implements os.IsNotExists on Plan9 +func (graph *Graph) IsNotExist(err error, id string) bool { + return err != nil && (strings.Contains(strings.ToLower(err.Error()), "does not exist") || strings.Contains(strings.ToLower(err.Error()), "no such")) && strings.Contains(err.Error(), id) +} + +// Exists returns true if an image is registered at the given id. +// If the image doesn't exist or if an error is encountered, false is returned. +func (graph *Graph) Exists(id string) bool { + if _, err := graph.Get(id); err != nil { + return false + } + return true +} + +// Get returns the image with the given id, or an error if the image doesn't exist. +func (graph *Graph) Get(name string) (*image.Image, error) { + id, err := graph.idIndex.Get(name) + if err != nil { + return nil, fmt.Errorf("could not find image: %v", err) + } + img, err := graph.loadImage(id) + if err != nil { + return nil, err + } + if img.ID != id { + return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) + } + + if img.Size < 0 { + size, err := graph.driver.DiffSize(img.ID, img.Parent) + if err != nil { + return nil, fmt.Errorf("unable to calculate size of image id %q: %s", img.ID, err) + } + + img.Size = size + if err := graph.saveSize(graph.imageRoot(id), int(img.Size)); err != nil { + return nil, err + } + } + return img, nil +} + +// Create creates a new image and registers it in the graph. +func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { + img := &image.Image{ + ID: stringid.GenerateRandomID(), + Comment: comment, + Created: time.Now().UTC(), + DockerVersion: dockerversion.VERSION, + Author: author, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + } + + if containerID != "" { + img.Parent = containerImage + img.Container = containerID + img.ContainerConfig = *containerConfig + } + + if err := graph.Register(v1ImageDescriptor{img}, layerData); err != nil { + return nil, err + } + return img, nil +} + +// Register imports a pre-existing image into the graph. +// Returns nil if the image is already registered. +func (graph *Graph) Register(im image.ImageDescriptor, layerData archive.ArchiveReader) (err error) { + imgID := im.ID() + + if err := image.ValidateID(imgID); err != nil { + return err + } + + // We need this entire operation to be atomic within the engine. Note that + // this doesn't mean Register is fully safe yet. + graph.imageMutex.Lock(imgID) + defer graph.imageMutex.Unlock(imgID) + + return graph.register(im, layerData) +} + +func (graph *Graph) register(im image.ImageDescriptor, layerData archive.ArchiveReader) (err error) { + imgID := im.ID() + + // Skip register if image is already registered + if graph.Exists(imgID) { + return nil + } + + // The returned `error` must be named in this function's signature so that + // `err` is not shadowed in this deferred cleanup. + defer func() { + // If any error occurs, remove the new dir from the driver. + // Don't check for errors since the dir might not have been created. + if err != nil { + graph.driver.Remove(imgID) + } + }() + + // Ensure that the image root does not exist on the filesystem + // when it is not registered in the graph. + // This is common when you switch from one graph driver to another + if err := os.RemoveAll(graph.imageRoot(imgID)); err != nil && !os.IsNotExist(err) { + return err + } + + // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. + // (the graph is the source of truth). + // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. + // (FIXME: make that mandatory for drivers). + graph.driver.Remove(imgID) + + tmp, err := graph.mktemp("") + defer os.RemoveAll(tmp) + if err != nil { + return fmt.Errorf("mktemp failed: %s", err) + } + + parent := im.Parent() + + // Create root filesystem in the driver + if err := createRootFilesystemInDriver(graph, imgID, parent, layerData); err != nil { + return err + } + + // Apply the diff/layer + config, err := im.MarshalConfig() + if err != nil { + return err + } + if err := graph.storeImage(imgID, parent, config, layerData, tmp); err != nil { + return err + } + // Commit + if err := os.Rename(tmp, graph.imageRoot(imgID)); err != nil { + return err + } + graph.idIndex.Add(imgID) + return nil +} + +func createRootFilesystemInDriver(graph *Graph, id, parent string, layerData archive.ArchiveReader) error { + if err := graph.driver.Create(id, parent); err != nil { + return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, id, err) + } + return nil +} + +// TempLayerArchive creates a temporary archive of the given image's filesystem layer. +// The archive is stored on disk and will be automatically deleted as soon as has been read. +// If output is not nil, a human-readable progress bar will be written to it. +func (graph *Graph) TempLayerArchive(id string, sf *streamformatter.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { + image, err := graph.Get(id) + if err != nil { + return nil, err + } + tmp, err := graph.mktemp("") + if err != nil { + return nil, err + } + a, err := graph.TarLayer(image) + if err != nil { + return nil, err + } + progressReader := progressreader.New(progressreader.Config{ + In: a, + Out: output, + Formatter: sf, + Size: 0, + NewLines: false, + ID: stringid.TruncateID(id), + Action: "Buffering to disk", + }) + defer progressReader.Close() + return archive.NewTempArchive(progressReader, tmp) +} + +// mktemp creates a temporary sub-directory inside the graph's filesystem. +func (graph *Graph) mktemp(id string) (string, error) { + dir := filepath.Join(graph.root, "_tmp", stringid.GenerateRandomID()) + if err := system.MkdirAll(dir, 0700); err != nil { + return "", err + } + return dir, nil +} + +func (graph *Graph) newTempFile() (*os.File, error) { + tmp, err := graph.mktemp("") + if err != nil { + return nil, err + } + return ioutil.TempFile(tmp, "") +} + +func bufferToFile(f *os.File, src io.Reader) (int64, digest.Digest, error) { + var ( + h = sha256.New() + w = gzip.NewWriter(io.MultiWriter(f, h)) + ) + _, err := io.Copy(w, src) + w.Close() + if err != nil { + return 0, "", err + } + n, err := f.Seek(0, os.SEEK_CUR) + if err != nil { + return 0, "", err + } + if _, err := f.Seek(0, 0); err != nil { + return 0, "", err + } + return n, digest.NewDigest("sha256", h), nil +} + +// Delete atomically removes an image from the graph. +func (graph *Graph) Delete(name string) error { + id, err := graph.idIndex.Get(name) + if err != nil { + return err + } + tmp, err := graph.mktemp("") + graph.idIndex.Delete(id) + if err == nil { + if err := os.Rename(graph.imageRoot(id), tmp); err != nil { + // On err make tmp point to old dir and cleanup unused tmp dir + os.RemoveAll(tmp) + tmp = graph.imageRoot(id) + } + } else { + // On err make tmp point to old dir for cleanup + tmp = graph.imageRoot(id) + } + // Remove rootfs data from the driver + graph.driver.Remove(id) + // Remove the trashed image directory + return os.RemoveAll(tmp) +} + +// Map returns a list of all images in the graph, addressable by ID. +func (graph *Graph) Map() map[string]*image.Image { + images := make(map[string]*image.Image) + graph.walkAll(func(image *image.Image) { + images[image.ID] = image + }) + return images +} + +// walkAll iterates over each image in the graph, and passes it to a handler. +// The walking order is undetermined. +func (graph *Graph) walkAll(handler func(*image.Image)) { + graph.idIndex.Iterate(func(id string) { + if img, err := graph.Get(id); err != nil { + return + } else if handler != nil { + handler(img) + } + }) +} + +// ByParent returns a lookup table of images by their parent. +// If an image of id ID has 3 children images, then the value for key ID +// will be a list of 3 images. +// If an image has no children, it will not have an entry in the table. +func (graph *Graph) ByParent() map[string][]*image.Image { + byParent := make(map[string][]*image.Image) + graph.walkAll(func(img *image.Image) { + parent, err := graph.Get(img.Parent) + if err != nil { + return + } + if children, exists := byParent[parent.ID]; exists { + byParent[parent.ID] = append(children, img) + } else { + byParent[parent.ID] = []*image.Image{img} + } + }) + return byParent +} + +// If the images and layers are in pulling chain, retain them. +// If not, they may be deleted by rmi with dangling condition. +func (graph *Graph) Retain(sessionID string, layerIDs ...string) { + graph.retained.Add(sessionID, layerIDs) +} + +// Release removes the referenced image id from the provided set of layers. +func (graph *Graph) Release(sessionID string, layerIDs ...string) { + graph.retained.Delete(sessionID, layerIDs) +} + +// Heads returns all heads in the graph, keyed by id. +// A head is an image which is not the parent of another image in the graph. +func (graph *Graph) Heads() map[string]*image.Image { + heads := make(map[string]*image.Image) + byParent := graph.ByParent() + graph.walkAll(func(image *image.Image) { + // If it's not in the byParent lookup table, then + // it's not a parent -> so it's a head! + if _, exists := byParent[image.ID]; !exists { + heads[image.ID] = image + } + }) + return heads +} + +func (graph *Graph) imageRoot(id string) string { + return filepath.Join(graph.root, id) +} + +// loadImage fetches the image with the given id from the graph. +func (graph *Graph) loadImage(id string) (*image.Image, error) { + root := graph.imageRoot(id) + + // Open the JSON file to decode by streaming + jsonSource, err := os.Open(jsonPath(root)) + if err != nil { + return nil, err + } + defer jsonSource.Close() + + img := &image.Image{} + dec := json.NewDecoder(jsonSource) + + // Decode the JSON data + if err := dec.Decode(img); err != nil { + return nil, err + } + + if img.ID == "" { + img.ID = id + } + + if img.Parent == "" && img.ParentID != "" && img.ParentID.Validate() == nil { + img.Parent = img.ParentID.Hex() + } + + // compatibilityID for parent + parent, err := ioutil.ReadFile(filepath.Join(root, parentFileName)) + if err == nil && len(parent) > 0 { + img.Parent = string(parent) + } + + if err := image.ValidateID(img.ID); err != nil { + return nil, err + } + + if buf, err := ioutil.ReadFile(filepath.Join(root, layersizeFileName)); err != nil { + if !os.IsNotExist(err) { + return nil, err + } + // If the layersize file does not exist then set the size to a negative number + // because a layer size of 0 (zero) is valid + img.Size = -1 + } else { + // Using Atoi here instead would temporarily convert the size to a machine + // dependent integer type, which causes images larger than 2^31 bytes to + // display negative sizes on 32-bit machines: + size, err := strconv.ParseInt(string(buf), 10, 64) + if err != nil { + return nil, err + } + img.Size = int64(size) + } + + return img, nil +} + +// saveSize stores the `size` in the provided graph `img` directory `root`. +func (graph *Graph) saveSize(root string, size int) error { + if err := ioutil.WriteFile(filepath.Join(root, layersizeFileName), []byte(strconv.Itoa(size)), 0600); err != nil { + return fmt.Errorf("Error storing image size in %s/%s: %s", root, layersizeFileName, err) + } + return nil +} + +// SetDigest sets the digest for the image layer to the provided value. +func (graph *Graph) SetLayerDigest(id string, dgst digest.Digest) error { + graph.imageMutex.Lock(id) + defer graph.imageMutex.Unlock(id) + + return graph.setLayerDigest(id, dgst) +} +func (graph *Graph) setLayerDigest(id string, dgst digest.Digest) error { + root := graph.imageRoot(id) + if err := ioutil.WriteFile(filepath.Join(root, digestFileName), []byte(dgst.String()), 0600); err != nil { + return fmt.Errorf("Error storing digest in %s/%s: %s", root, digestFileName, err) + } + return nil +} + +// GetDigest gets the digest for the provide image layer id. +func (graph *Graph) GetLayerDigest(id string) (digest.Digest, error) { + graph.imageMutex.Lock(id) + defer graph.imageMutex.Unlock(id) + + return graph.getLayerDigest(id) +} + +func (graph *Graph) getLayerDigest(id string) (digest.Digest, error) { + root := graph.imageRoot(id) + cs, err := ioutil.ReadFile(filepath.Join(root, digestFileName)) + if err != nil { + if os.IsNotExist(err) { + return "", ErrDigestNotSet + } + return "", err + } + return digest.ParseDigest(string(cs)) +} + +// SetV1CompatibilityConfig stores the v1Compatibility JSON data associated +// with the image in the manifest to the disk +func (graph *Graph) SetV1CompatibilityConfig(id string, data []byte) error { + graph.imageMutex.Lock(id) + defer graph.imageMutex.Unlock(id) + + return graph.setV1CompatibilityConfig(id, data) +} +func (graph *Graph) setV1CompatibilityConfig(id string, data []byte) error { + root := graph.imageRoot(id) + return ioutil.WriteFile(filepath.Join(root, v1CompatibilityFileName), data, 0600) +} + +// GetV1CompatibilityConfig reads the v1Compatibility JSON data for the image +// from the disk +func (graph *Graph) GetV1CompatibilityConfig(id string) ([]byte, error) { + graph.imageMutex.Lock(id) + defer graph.imageMutex.Unlock(id) + + return graph.getV1CompatibilityConfig(id) +} + +func (graph *Graph) getV1CompatibilityConfig(id string) ([]byte, error) { + root := graph.imageRoot(id) + return ioutil.ReadFile(filepath.Join(root, v1CompatibilityFileName)) +} + +// GenerateV1CompatibilityChain makes sure v1Compatibility JSON data exists +// for the image. If it doesn't it generates and stores it for the image and +// all of it's parents based on the image config JSON. +func (graph *Graph) GenerateV1CompatibilityChain(id string) ([]byte, error) { + graph.imageMutex.Lock(id) + defer graph.imageMutex.Unlock(id) + + if v1config, err := graph.getV1CompatibilityConfig(id); err == nil { + return v1config, nil + } + + // generate new, store it to disk + img, err := graph.Get(id) + if err != nil { + return nil, err + } + + digestPrefix := string(digest.Canonical) + ":" + img.ID = strings.TrimPrefix(img.ID, digestPrefix) + + if img.Parent != "" { + parentConfig, err := graph.GenerateV1CompatibilityChain(img.Parent) + if err != nil { + return nil, err + } + var parent struct{ ID string } + err = json.Unmarshal(parentConfig, &parent) + if err != nil { + return nil, err + } + img.Parent = parent.ID + } + + json, err := json.Marshal(img) + if err != nil { + return nil, err + } + if err := graph.setV1CompatibilityConfig(id, json); err != nil { + return nil, err + } + return json, nil +} + +// RawJSON returns the JSON representation for an image as a byte array. +func (graph *Graph) RawJSON(id string) ([]byte, error) { + root := graph.imageRoot(id) + + buf, err := ioutil.ReadFile(jsonPath(root)) + if err != nil { + return nil, fmt.Errorf("Failed to read json for image %s: %s", id, err) + } + + return buf, nil +} + +func jsonPath(root string) string { + return filepath.Join(root, jsonFileName) +} + +func (graph *Graph) disassembleAndApplyTarLayer(id, parent string, layerData archive.ArchiveReader, root string) (size int64, err error) { + // this is saving the tar-split metadata + mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) + if err != nil { + return 0, err + } + mfz := gzip.NewWriter(mf) + metaPacker := storage.NewJSONPacker(mfz) + defer mf.Close() + defer mfz.Close() + + inflatedLayerData, err := archive.DecompressStream(layerData) + if err != nil { + return 0, err + } + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil) + if err != nil { + return 0, err + } + + if size, err = graph.driver.ApplyDiff(id, parent, archive.ArchiveReader(rdr)); err != nil { + return 0, err + } + + return +} + +func (graph *Graph) assembleTarLayer(img *image.Image) (archive.Archive, error) { + root := graph.imageRoot(img.ID) + mFileName := filepath.Join(root, tarDataFileName) + mf, err := os.Open(mFileName) + if err != nil { + if !os.IsNotExist(err) { + logrus.Errorf("failed to open %q: %s", mFileName, err) + } + return nil, err + } + pR, pW := io.Pipe() + // this will need to be in a goroutine, as we are returning the stream of a + // tar archive, but can not close the metadata reader early (when this + // function returns)... + go func() { + defer mf.Close() + // let's reassemble! + logrus.Debugf("[graph] TarLayer with reassembly: %s", img.ID) + mfz, err := gzip.NewReader(mf) + if err != nil { + pW.CloseWithError(fmt.Errorf("[graph] error with %s: %s", mFileName, err)) + return + } + defer mfz.Close() + + // get our relative path to the container + fsLayer, err := graph.driver.Get(img.ID, "") + if err != nil { + pW.CloseWithError(err) + return + } + defer graph.driver.Put(img.ID) + + metaUnpacker := storage.NewJSONUnpacker(mfz) + fileGetter := storage.NewPathFileGetter(fsLayer) + logrus.Debugf("[graph] %s is at %q", img.ID, fsLayer) + ots := asm.NewOutputTarStream(fileGetter, metaUnpacker) + defer ots.Close() + if _, err := io.Copy(pW, ots); err != nil { + pW.CloseWithError(err) + return + } + pW.Close() + }() + return pR, nil +} diff --git a/graph/graph_test.go b/graph/graph_test.go new file mode 100644 index 00000000..3782b7db --- /dev/null +++ b/graph/graph_test.go @@ -0,0 +1,308 @@ +package graph + +import ( + "errors" + "io" + "io/ioutil" + "os" + "path" + "testing" + "time" + + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" +) + +func TestMount(t *testing.T) { + graph, driver := tempGraph(t) + defer os.RemoveAll(graph.root) + defer driver.Cleanup() + + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + image, err := graph.Create(archive, "", "", "Testing", "", nil, nil) + if err != nil { + t.Fatal(err) + } + tmp, err := ioutil.TempDir("", "docker-test-graph-mount-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + rootfs := path.Join(tmp, "rootfs") + if err := os.MkdirAll(rootfs, 0700); err != nil { + t.Fatal(err) + } + rw := path.Join(tmp, "rw") + if err := os.MkdirAll(rw, 0700); err != nil { + t.Fatal(err) + } + + if _, err := driver.Get(image.ID, ""); err != nil { + t.Fatal(err) + } + +} + +func TestInit(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + // Root should exist + if _, err := os.Stat(graph.root); err != nil { + t.Fatal(err) + } + // Map() should be empty + l := graph.Map() + if len(l) != 0 { + t.Fatalf("len(Map()) should return %d, not %d", 0, len(l)) + } +} + +// Test that Register can be interrupted cleanly without side effects +func TestInterruptedRegister(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data + image := &image.Image{ + ID: stringid.GenerateRandomID(), + Comment: "testing", + Created: time.Now(), + } + w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) + graph.Register(v1ImageDescriptor{image}, badArchive) + if _, err := graph.Get(image.ID); err == nil { + t.Fatal("Image should not exist after Register is interrupted") + } + // Registering the same image again should succeed if the first register was interrupted + goodArchive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + if err := graph.Register(v1ImageDescriptor{image}, goodArchive); err != nil { + t.Fatal(err) + } +} + +// FIXME: Do more extensive tests (ex: create multiple, delete, recreate; +// create multiple, check the amount of images and paths, etc..) +func TestGraphCreate(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img, err := graph.Create(archive, "", "", "Testing", "", nil, nil) + if err != nil { + t.Fatal(err) + } + if err := image.ValidateID(img.ID); err != nil { + t.Fatal(err) + } + if img.Comment != "Testing" { + t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment) + } + if img.DockerVersion != dockerversion.VERSION { + t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, img.DockerVersion) + } + images := graph.Map() + if l := len(images); l != 1 { + t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) + } + if images[img.ID] == nil { + t.Fatalf("Could not find image with id %s", img.ID) + } +} + +func TestRegister(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + image := &image.Image{ + ID: stringid.GenerateRandomID(), + Comment: "testing", + Created: time.Now(), + } + err = graph.Register(v1ImageDescriptor{image}, archive) + if err != nil { + t.Fatal(err) + } + images := graph.Map() + if l := len(images); l != 1 { + t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) + } + if resultImg, err := graph.Get(image.ID); err != nil { + t.Fatal(err) + } else { + if resultImg.ID != image.ID { + t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID) + } + if resultImg.Comment != image.Comment { + t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment) + } + } +} + +// Test that an image can be deleted by its shorthand prefix +func TestDeletePrefix(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + img := createTestImage(graph, t) + if err := graph.Delete(stringid.TruncateID(img.ID)); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 0) +} + +func TestDelete(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 0) + img, err := graph.Create(archive, "", "", "Bla bla", "", nil, nil) + if err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 1) + if err := graph.Delete(img.ID); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 0) + + archive, err = fakeTar() + if err != nil { + t.Fatal(err) + } + // Test 2 create (same name) / 1 delete + img1, err := graph.Create(archive, "", "", "Testing", "", nil, nil) + if err != nil { + t.Fatal(err) + } + archive, err = fakeTar() + if err != nil { + t.Fatal(err) + } + if _, err = graph.Create(archive, "", "", "Testing", "", nil, nil); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 2) + if err := graph.Delete(img1.ID); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 1) + + // Test delete wrong name + if err := graph.Delete("Not_foo"); err == nil { + t.Fatalf("Deleting wrong ID should return an error") + } + assertNImages(graph, t, 1) + + archive, err = fakeTar() + if err != nil { + t.Fatal(err) + } + // Test delete twice (pull -> rm -> pull -> rm) + if err := graph.Register(v1ImageDescriptor{img1}, archive); err != nil { + t.Fatal(err) + } + if err := graph.Delete(img1.ID); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 1) +} + +func TestByParent(t *testing.T) { + archive1, _ := fakeTar() + archive2, _ := fakeTar() + archive3, _ := fakeTar() + + graph, _ := tempGraph(t) + defer nukeGraph(graph) + parentImage := &image.Image{ + ID: stringid.GenerateRandomID(), + Comment: "parent", + Created: time.Now(), + Parent: "", + } + childImage1 := &image.Image{ + ID: stringid.GenerateRandomID(), + Comment: "child1", + Created: time.Now(), + Parent: parentImage.ID, + } + childImage2 := &image.Image{ + ID: stringid.GenerateRandomID(), + Comment: "child2", + Created: time.Now(), + Parent: parentImage.ID, + } + + err := graph.Register(v1ImageDescriptor{parentImage}, archive1) + if err != nil { + t.Fatal(err) + } + err = graph.Register(v1ImageDescriptor{childImage1}, archive2) + if err != nil { + t.Fatal(err) + } + err = graph.Register(v1ImageDescriptor{childImage2}, archive3) + if err != nil { + t.Fatal(err) + } + + byParent := graph.ByParent() + numChildren := len(byParent[parentImage.ID]) + if numChildren != 2 { + t.Fatalf("Expected 2 children, found %d", numChildren) + } +} + +func createTestImage(graph *Graph, t *testing.T) *image.Image { + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img, err := graph.Create(archive, "", "", "Test image", "", nil, nil) + if err != nil { + t.Fatal(err) + } + return img +} + +func assertNImages(graph *Graph, t *testing.T, n int) { + images := graph.Map() + if actualN := len(images); actualN != n { + t.Fatalf("Expected %d images, found %d", n, actualN) + } +} + +func tempGraph(t *testing.T) (*Graph, graphdriver.Driver) { + tmp, err := ioutil.TempDir("", "docker-graph-") + if err != nil { + t.Fatal(err) + } + driver, err := graphdriver.New(tmp, nil) + if err != nil { + t.Fatal(err) + } + graph, err := NewGraph(tmp, driver) + if err != nil { + t.Fatal(err) + } + return graph, driver +} + +func nukeGraph(graph *Graph) { + graph.driver.Cleanup() + os.RemoveAll(graph.root) +} diff --git a/graph/graph_unix.go b/graph/graph_unix.go new file mode 100644 index 00000000..a6ca7411 --- /dev/null +++ b/graph/graph_unix.go @@ -0,0 +1,122 @@ +// +build !windows + +package graph + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" +) + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func SetupInitLayer(initLayer string) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerinit": "file", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := system.MkdirAll(filepath.Join(initLayer, filepath.Dir(pth)), 0755); err != nil { + return err + } + switch typ { + case "dir": + if err := system.MkdirAll(filepath.Join(initLayer, pth), 0755); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} + +func (graph *Graph) restoreBaseImages() ([]string, error) { + return nil, nil +} + +// storeImage stores file system layer data for the given image to the +// graph's storage driver. Image metadata is stored in a file +// at the specified root directory. +func (graph *Graph) storeImage(id, parent string, config []byte, layerData archive.ArchiveReader, root string) (err error) { + var size int64 + // Store the layer. If layerData is not nil, unpack it into the new layer + if layerData != nil { + if size, err = graph.disassembleAndApplyTarLayer(id, parent, layerData, root); err != nil { + return err + } + } + + if err := graph.saveSize(root, int(size)); err != nil { + return err + } + + if err := ioutil.WriteFile(jsonPath(root), config, 0600); err != nil { + return err + } + + // If image is pointing to a parent via CompatibilityID write the reference to disk + img, err := image.NewImgJSON(config) + if err != nil { + return err + } + + if img.ParentID.Validate() == nil && parent != img.ParentID.Hex() { + if err := ioutil.WriteFile(filepath.Join(root, parentFileName), []byte(parent), 0600); err != nil { + return err + } + } + return nil +} + +// TarLayer returns a tar archive of the image's filesystem layer. +func (graph *Graph) TarLayer(img *image.Image) (arch archive.Archive, err error) { + rdr, err := graph.assembleTarLayer(img) + if err != nil { + logrus.Debugf("[graph] TarLayer with traditional differ: %s", img.ID) + return graph.driver.Diff(img.ID, img.Parent) + } + return rdr, nil +} diff --git a/graph/graph_windows.go b/graph/graph_windows.go new file mode 100644 index 00000000..33f4d877 --- /dev/null +++ b/graph/graph_windows.go @@ -0,0 +1,125 @@ +// +build windows + +package graph + +import ( + "io/ioutil" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/windows" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" +) + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. T +func SetupInitLayer(initLayer string) error { + return nil +} + +func (graph *Graph) restoreBaseImages() ([]string, error) { + // TODO Windows. This needs implementing (@swernli) + return nil, nil +} + +// ParentLayerIds returns a list of all parent image IDs for the given image. +func (graph *Graph) ParentLayerIds(img *image.Image) (ids []string, err error) { + for i := img; i != nil && err == nil; i, err = graph.GetParent(i) { + ids = append(ids, i.ID) + } + + return +} + +// storeImage stores file system layer data for the given image to the +// graph's storage driver. Image metadata is stored in a file +// at the specified root directory. +func (graph *Graph) storeImage(id, parent string, config []byte, layerData archive.ArchiveReader, root string) (err error) { + var size int64 + if wd, ok := graph.driver.(*windows.WindowsGraphDriver); ok { + // Store the layer. If layerData is not nil and this isn't a base image, + // unpack it into the new layer + if layerData != nil && parent != "" { + var ids []string + parentImg, err := graph.Get(parent) + if err != nil { + return err + } + + ids, err = graph.ParentLayerIds(parentImg) + if err != nil { + return err + } + + if size, err = wd.Import(id, layerData, wd.LayerIdsToPaths(ids)); err != nil { + return err + } + } + + } else { + // We keep this functionality here so that we can still work with the + // VFS driver during development. This will not be used for actual running + // of Windows containers. Without this code, it would not be possible to + // docker pull using the VFS driver. + + // Store the layer. If layerData is not nil, unpack it into the new layer + if layerData != nil { + if size, err = graph.disassembleAndApplyTarLayer(id, parent, layerData, root); err != nil { + return err + } + } + + if err := graph.saveSize(root, size); err != nil { + return err + } + + if err := ioutil.WriteFile(jsonPath(root), config, 0600); err != nil { + return err + } + + // If image is pointing to a parent via CompatibilityID write the reference to disk + img, err := image.NewImgJSON(config) + if err != nil { + return err + } + + if img.ParentID.Validate() == nil && parent != img.ParentID.Hex() { + if err := ioutil.WriteFile(filepath.Join(root, parentFileName), []byte(parent), 0600); err != nil { + return err + } + } + + return nil + } +} + +// TarLayer returns a tar archive of the image's filesystem layer. +func (graph *Graph) TarLayer(img *image.Image) (arch archive.Archive, err error) { + if wd, ok := graph.driver.(*windows.WindowsGraphDriver); ok { + var ids []string + if img.Parent != "" { + parentImg, err := graph.Get(img.Parent) + if err != nil { + return nil, err + } + + ids, err = graph.ParentLayerIds(parentImg) + if err != nil { + return nil, err + } + } + + return wd.Export(img.ID, wd.LayerIdsToPaths(ids)) + } else { + // We keep this functionality here so that we can still work with the VFS + // driver during development. VFS is not supported (and just will not work) + // for Windows containers. + rdr, err := graph.assembleTarLayer(img) + if err != nil { + logrus.Debugf("[graph] TarLayer with traditional differ: %s", img.ID) + return graph.driver.Diff(img.ID, img.Parent) + } + return rdr, nil + } +} diff --git a/graph/history.go b/graph/history.go new file mode 100644 index 00000000..4bb93fc3 --- /dev/null +++ b/graph/history.go @@ -0,0 +1,118 @@ +package graph + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/image" + "github.com/docker/docker/utils" +) + +// WalkHistory calls the handler function for each image in the +// provided images lineage starting from immediate parent. +func (graph *Graph) WalkHistory(img *image.Image, handler func(image.Image) error) (err error) { + currentImg := img + for currentImg != nil { + if handler != nil { + if err := handler(*currentImg); err != nil { + return err + } + } + currentImg, err = graph.GetParent(currentImg) + if err != nil { + return fmt.Errorf("Error while getting parent image: %v", err) + } + } + return nil +} + +// depth returns the number of parents for a +// current image +func (graph *Graph) depth(img *image.Image) (int, error) { + var ( + count = 0 + parent = img + err error + ) + + for parent != nil { + count++ + parent, err = graph.GetParent(parent) + if err != nil { + return -1, err + } + } + return count, nil +} + +// Set the max depth to the aufs default that most +// kernels are compiled with +// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk +const MaxImageDepth = 127 + +// CheckDepth returns an error if the depth of an image, as returned +// by ImageDepth, is too large to support creating a container from it +// on this daemon. +func (graph *Graph) CheckDepth(img *image.Image) error { + // We add 2 layers to the depth because the container's rw and + // init layer add to the restriction + depth, err := graph.depth(img) + if err != nil { + return err + } + if depth+2 >= MaxImageDepth { + return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) + } + return nil +} + +func (s *TagStore) History(name string) ([]*types.ImageHistory, error) { + foundImage, err := s.LookupImage(name) + if err != nil { + return nil, err + } + + lookupMap := make(map[string][]string) + for name, repository := range s.Repositories { + for tag, id := range repository { + // If the ID already has a reverse lookup, do not update it unless for "latest" + if _, exists := lookupMap[id]; !exists { + lookupMap[id] = []string{} + } + lookupMap[id] = append(lookupMap[id], utils.ImageReference(name, tag)) + } + } + + history := []*types.ImageHistory{} + + err = s.graph.WalkHistory(foundImage, func(img image.Image) error { + history = append(history, &types.ImageHistory{ + ID: img.ID, + Created: img.Created.Unix(), + CreatedBy: strings.Join(img.ContainerConfig.Cmd.Slice(), " "), + Tags: lookupMap[img.ID], + Size: img.Size, + Comment: img.Comment, + }) + return nil + }) + + return history, err +} + +func (graph *Graph) GetParent(img *image.Image) (*image.Image, error) { + if img.Parent == "" { + return nil, nil + } + return graph.Get(img.Parent) +} + +func (graph *Graph) GetParentsSize(img *image.Image, size int64) int64 { + parentImage, err := graph.GetParent(img) + if err != nil || parentImage == nil { + return size + } + size += parentImage.Size + return graph.GetParentsSize(parentImage, size) +} diff --git a/graph/import.go b/graph/import.go new file mode 100644 index 00000000..2e08e6c8 --- /dev/null +++ b/graph/import.go @@ -0,0 +1,78 @@ +package graph + +import ( + "io" + "net/http" + "net/url" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +type ImageImportConfig struct { + Changes []string + InConfig io.ReadCloser + OutStream io.Writer + ContainerConfig *runconfig.Config +} + +func (s *TagStore) Import(src string, repo string, tag string, imageImportConfig *ImageImportConfig) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + archive archive.ArchiveReader + resp *http.Response + ) + + if src == "-" { + archive = imageImportConfig.InConfig + } else { + u, err := url.Parse(src) + if err != nil { + return err + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + imageImportConfig.OutStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = httputils.Download(u.String()) + if err != nil { + return err + } + progressReader := progressreader.New(progressreader.Config{ + In: resp.Body, + Out: imageImportConfig.OutStream, + Formatter: sf, + Size: int(resp.ContentLength), + NewLines: true, + ID: "", + Action: "Importing", + }) + defer progressReader.Close() + archive = progressReader + } + + img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, imageImportConfig.ContainerConfig) + if err != nil { + return err + } + // Optionally register the image at REPO/TAG + if repo != "" { + if err := s.Tag(repo, tag, img.ID, true); err != nil { + return err + } + } + imageImportConfig.OutStream.Write(sf.FormatStatus("", img.ID)) + logID := img.ID + if tag != "" { + logID = utils.ImageReference(logID, tag) + } + + s.eventsService.Log("import", logID, "") + return nil +} diff --git a/graph/list.go b/graph/list.go new file mode 100644 index 00000000..89b1a93b --- /dev/null +++ b/graph/list.go @@ -0,0 +1,150 @@ +package graph + +import ( + "fmt" + "path" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/utils" +) + +var acceptedImageFilterTags = map[string]struct{}{ + "dangling": {}, + "label": {}, +} + +type ImagesConfig struct { + Filters string + Filter string + All bool +} + +type ByCreated []*types.Image + +func (r ByCreated) Len() int { return len(r) } +func (r ByCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r ByCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +func (s *TagStore) Images(config *ImagesConfig) ([]*types.Image, error) { + var ( + allImages map[string]*image.Image + err error + filtTagged = true + filtLabel = false + ) + + imageFilters, err := filters.FromParam(config.Filters) + if err != nil { + return nil, err + } + for name := range imageFilters { + if _, ok := acceptedImageFilterTags[name]; !ok { + return nil, fmt.Errorf("Invalid filter '%s'", name) + } + } + + if i, ok := imageFilters["dangling"]; ok { + for _, value := range i { + if strings.ToLower(value) == "true" { + filtTagged = false + } + } + } + + _, filtLabel = imageFilters["label"] + + if config.All && filtTagged { + allImages = s.graph.Map() + } else { + allImages = s.graph.Heads() + } + + lookup := make(map[string]*types.Image) + s.Lock() + for repoName, repository := range s.Repositories { + if config.Filter != "" { + if match, _ := path.Match(config.Filter, repoName); !match { + continue + } + } + for ref, id := range repository { + imgRef := utils.ImageReference(repoName, ref) + image, err := s.graph.Get(id) + if err != nil { + logrus.Warnf("couldn't load %s from %s: %s", id, imgRef, err) + continue + } + + if lImage, exists := lookup[id]; exists { + if filtTagged { + if utils.DigestReference(ref) { + lImage.RepoDigests = append(lImage.RepoDigests, imgRef) + } else { // Tag Ref. + lImage.RepoTags = append(lImage.RepoTags, imgRef) + } + } + } else { + // get the boolean list for if only the untagged images are requested + delete(allImages, id) + if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) { + continue + } + if filtTagged { + newImage := new(types.Image) + newImage.ParentId = image.Parent + newImage.ID = image.ID + newImage.Created = int(image.Created.Unix()) + newImage.Size = int(image.Size) + newImage.VirtualSize = int(s.graph.GetParentsSize(image, 0) + image.Size) + newImage.Labels = image.ContainerConfig.Labels + + if utils.DigestReference(ref) { + newImage.RepoTags = []string{} + newImage.RepoDigests = []string{imgRef} + } else { + newImage.RepoTags = []string{imgRef} + newImage.RepoDigests = []string{} + } + + lookup[id] = newImage + } + } + + } + } + s.Unlock() + + images := []*types.Image{} + for _, value := range lookup { + images = append(images, value) + } + + // Display images which aren't part of a repository/tag + if config.Filter == "" || filtLabel { + for _, image := range allImages { + if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) { + continue + } + newImage := new(types.Image) + newImage.ParentId = image.Parent + newImage.RepoTags = []string{":"} + newImage.RepoDigests = []string{"@"} + newImage.ID = image.ID + newImage.Created = int(image.Created.Unix()) + newImage.Size = int(image.Size) + newImage.VirtualSize = int(s.graph.GetParentsSize(image, 0) + image.Size) + newImage.Labels = image.ContainerConfig.Labels + + images = append(images, newImage) + } + } + + sort.Sort(sort.Reverse(ByCreated(images))) + + return images, nil +} diff --git a/graph/load.go b/graph/load.go new file mode 100644 index 00000000..c2010d3b --- /dev/null +++ b/graph/load.go @@ -0,0 +1,135 @@ +// +build linux windows + +package graph + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" +) + +// Loads a set of images into the repository. This is the complementary of ImageExport. +// The input stream is an uncompressed tar ball containing images and metadata. +func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error { + tmpImageDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpImageDir) + + var ( + repoDir = filepath.Join(tmpImageDir, "repo") + ) + + if err := os.Mkdir(repoDir, os.ModeDir); err != nil { + return err + } + images := s.graph.Map() + excludes := make([]string, len(images)) + i := 0 + for k := range images { + excludes[i] = k + i++ + } + if err := chrootarchive.Untar(inTar, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil { + return err + } + + dirs, err := ioutil.ReadDir(repoDir) + if err != nil { + return err + } + + for _, d := range dirs { + if d.IsDir() { + if err := s.recursiveLoad(d.Name(), tmpImageDir); err != nil { + return err + } + } + } + + reposJSONFile, err := os.Open(filepath.Join(tmpImageDir, "repo", "repositories")) + if err != nil { + if !os.IsNotExist(err) { + return err + } + return nil + } + defer reposJSONFile.Close() + + repositories := map[string]Repository{} + if err := json.NewDecoder(reposJSONFile).Decode(&repositories); err != nil { + return err + } + + for imageName, tagMap := range repositories { + for tag, address := range tagMap { + if err := s.SetLoad(imageName, tag, address, true, outStream); err != nil { + return err + } + } + } + + return nil +} + +func (s *TagStore) recursiveLoad(address, tmpImageDir string) error { + if _, err := s.LookupImage(address); err != nil { + logrus.Debugf("Loading %s", address) + + imageJson, err := ioutil.ReadFile(filepath.Join(tmpImageDir, "repo", address, "json")) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + layer, err := os.Open(filepath.Join(tmpImageDir, "repo", address, "layer.tar")) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return err + } + img, err := image.NewImgJSON(imageJson) + if err != nil { + logrus.Debugf("Error unmarshalling json: %v", err) + return err + } + if err := image.ValidateID(img.ID); err != nil { + logrus.Debugf("Error validating ID: %v", err) + return err + } + + // ensure no two downloads of the same layer happen at the same time + if c, err := s.poolAdd("pull", "layer:"+img.ID); err != nil { + if c != nil { + logrus.Debugf("Image (id: %s) load is already running, waiting: %v", img.ID, err) + <-c + return nil + } + + return err + } + + defer s.poolRemove("pull", "layer:"+img.ID) + + if img.Parent != "" { + if !s.graph.Exists(img.Parent) { + if err := s.recursiveLoad(img.Parent, tmpImageDir); err != nil { + return err + } + } + } + if err := s.graph.Register(v1ImageDescriptor{img}, layer); err != nil { + return err + } + } + logrus.Debugf("Completed processing %s", address) + + return nil +} diff --git a/graph/load_unsupported.go b/graph/load_unsupported.go new file mode 100644 index 00000000..45bdd98b --- /dev/null +++ b/graph/load_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package graph + +import ( + "fmt" + "io" +) + +func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error { + return fmt.Errorf("Load is not supported on this platform") +} diff --git a/graph/mutex.go b/graph/mutex.go new file mode 100644 index 00000000..a5f3991b --- /dev/null +++ b/graph/mutex.go @@ -0,0 +1,45 @@ +package graph + +import "sync" + +// imageMutex provides a lock per image id to protect shared resources in the +// graph. This is only used with registration but should be used when +// manipulating the layer store. +type imageMutex struct { + mus map[string]*sync.Mutex // mutexes by image id. + mu sync.Mutex // protects lock map + + // NOTE(stevvooe): The map above will grow to the size of all images ever + // registered during a daemon run. To free these resources, we must + // deallocate after unlock. Doing this safely is non-trivial in the face + // of a very minor leak. +} + +// Lock the provided id. +func (im *imageMutex) Lock(id string) { + im.getImageLock(id).Lock() +} + +// Unlock the provided id. +func (im *imageMutex) Unlock(id string) { + im.getImageLock(id).Unlock() +} + +// getImageLock returns the mutex for the given id. This method will never +// return nil. +func (im *imageMutex) getImageLock(id string) *sync.Mutex { + im.mu.Lock() + defer im.mu.Unlock() + + if im.mus == nil { // lazy + im.mus = make(map[string]*sync.Mutex) + } + + mu, ok := im.mus[id] + if !ok { + mu = new(sync.Mutex) + im.mus[id] = mu + } + + return mu +} diff --git a/graph/pools_test.go b/graph/pools_test.go new file mode 100644 index 00000000..129a5e1f --- /dev/null +++ b/graph/pools_test.go @@ -0,0 +1,49 @@ +package graph + +import ( + "testing" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +func TestPools(t *testing.T) { + s := &TagStore{ + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), + } + + if _, err := s.poolAdd("pull", "test1"); err != nil { + t.Fatal(err) + } + if _, err := s.poolAdd("pull", "test2"); err != nil { + t.Fatal(err) + } + if _, err := s.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := s.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := s.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } + if err := s.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("pull", "test1"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("push", "test1"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } +} diff --git a/graph/pull.go b/graph/pull.go new file mode 100644 index 00000000..736c5add --- /dev/null +++ b/graph/pull.go @@ -0,0 +1,129 @@ +package graph + +import ( + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +type ImagePullConfig struct { + MetaHeaders map[string][]string + AuthConfig *cliconfig.AuthConfig + OutStream io.Writer +} + +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + // TODO(tiborvass): have Pull() take a reference to repository + tag, so that the puller itself is repository-agnostic. + Pull(tag string) (fallback bool, err error) +} + +func NewPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig, sf *streamformatter.StreamFormatter) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + TagStore: s, + endpoint: endpoint, + config: imagePullConfig, + sf: sf, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + TagStore: s, + endpoint: endpoint, + config: imagePullConfig, + sf: sf, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error { + var sf = streamformatter.NewJSONStreamFormatter() + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := s.registryService.ResolveRepository(image) + if err != nil { + return err + } + + // makes sure name is not empty or `scratch` + if err := validateRepoName(repoInfo.LocalName); err != nil { + return err + } + + endpoints, err := s.registryService.LookupPullEndpoints(repoInfo.CanonicalName) + if err != nil { + return err + } + + logName := repoInfo.LocalName + if tag != "" { + logName = utils.ImageReference(logName, tag) + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + ) + for _, endpoint := range endpoints { + logrus.Debugf("Trying to pull %s from %s %s", repoInfo.LocalName, endpoint.URL, endpoint.Version) + + puller, err := NewPuller(s, endpoint, repoInfo, imagePullConfig, sf) + if err != nil { + lastErr = err + continue + } + if fallback, err := puller.Pull(tag); err != nil { + if fallback { + if _, ok := err.(registry.ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // save the current error + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + lastErr = err + } + continue + } + logrus.Debugf("Not continuing with error: %v", err) + return err + + } + + s.eventsService.Log("pull", logName, "") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", image) + } + return lastErr +} + +func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layersDownloaded bool) { + if layersDownloaded { + out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag)) + } else { + out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag)) + } +} diff --git a/graph/pull_v1.go b/graph/pull_v1.go new file mode 100644 index 00000000..b07ad034 --- /dev/null +++ b/graph/pull_v1.go @@ -0,0 +1,339 @@ +package graph + +import ( + "errors" + "fmt" + "net" + "net/url" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +type v1Puller struct { + *TagStore + endpoint registry.APIEndpoint + config *ImagePullConfig + sf *streamformatter.StreamFormatter + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(tag string) (fallback bool, err error) { + if utils.DigestReference(tag) { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return true, registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")} + } + + tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return false, err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return true, err + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return true, err + } + if err := p.pullRepository(tag); err != nil { + // TODO(dmcgowan): Check if should fallback + return false, err + } + out := p.config.OutStream + out.Write(p.sf.FormatStatus("", "%s: this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.", p.repoInfo.CanonicalName)) + + return false, nil +} + +func (p *v1Puller) pullRepository(askedTag string) error { + out := p.config.OutStream + out.Write(p.sf.FormatStatus("", "Pulling repository %s", p.repoInfo.CanonicalName)) + + repoData, err := p.session.GetRepositoryData(p.repoInfo.RemoteName) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return fmt.Errorf("Error: image %s not found", utils.ImageReference(p.repoInfo.RemoteName, askedTag)) + } + // Unexpected HTTP error + return err + } + + logrus.Debugf("Retrieving the tag list") + tagsList := make(map[string]string) + if askedTag == "" { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.RemoteName) + } else { + var tagId string + tagId, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.RemoteName, askedTag) + tagsList[askedTag] = tagId + } + if err != nil { + if err == registry.ErrRepoNotFound && askedTag != "" { + return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName) + } + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + logrus.Debugf("Registering tags") + // If no tag has been specified, pull them all + if askedTag == "" { + for tag, id := range tagsList { + repoData.ImgList[id].Tag = tag + } + } else { + // Otherwise, check that the tag exists and use only that one + id, exists := tagsList[askedTag] + if !exists { + return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName) + } + repoData.ImgList[id].Tag = askedTag + } + + errors := make(chan error) + + layersDownloaded := false + imgIDs := []string{} + sessionID := p.session.ID() + defer func() { + p.graph.Release(sessionID, imgIDs...) + }() + for _, imgData := range repoData.ImgList { + downloadImage := func(img *registry.ImgData) { + if askedTag != "" && img.Tag != askedTag { + errors <- nil + return + } + + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + errors <- nil + return + } + + if err := image.ValidateID(img.ID); err != nil { + errors <- err + return + } + + // ensure no two downloads of the same image happen at the same time + if c, err := p.poolAdd("pull", "img:"+img.ID); err != nil { + if c != nil { + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) + <-c + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) + } else { + logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + } + errors <- nil + return + } + defer p.poolRemove("pull", "img:"+img.ID) + + // we need to retain it until tagging + p.graph.Retain(sessionID, img.ID) + imgIDs = append(imgIDs, img.ID) + + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil)) + success := false + var lastErr, err error + var isDownloaded bool + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) + if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err) + continue + } + layersDownloaded = layersDownloaded || isDownloaded + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) + if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil)) + continue + } + layersDownloaded = layersDownloaded || isDownloaded + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName, lastErr) + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil)) + errors <- err + return + } + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) + + errors <- nil + } + + go downloadImage(imgData) + } + + var lastError error + for i := 0; i < len(repoData.ImgList); i++ { + if err := <-errors; err != nil { + lastError = err + } + } + if lastError != nil { + return lastError + } + + for tag, id := range tagsList { + if askedTag != "" && tag != askedTag { + continue + } + if err := p.Tag(p.repoInfo.LocalName, tag, id, true); err != nil { + return err + } + } + + requestedTag := p.repoInfo.LocalName + if len(askedTag) > 0 { + requestedTag = utils.ImageReference(p.repoInfo.LocalName, askedTag) + } + WriteStatus(requestedTag, out, p.sf, layersDownloaded) + return nil +} + +func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, error) { + history, err := p.session.GetRemoteHistory(imgID, endpoint) + if err != nil { + return false, err + } + out := p.config.OutStream + out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil)) + // FIXME: Try to stream the images? + // FIXME: Launch the getRemoteImage() in goroutines + + sessionID := p.session.ID() + // As imgID has been retained in pullRepository, no need to retain again + p.graph.Retain(sessionID, history[1:]...) + defer p.graph.Release(sessionID, history[1:]...) + + layersDownloaded := false + for i := len(history) - 1; i >= 0; i-- { + id := history[i] + + // ensure no two downloads of the same layer happen at the same time + if c, err := p.poolAdd("pull", "layer:"+id); err != nil { + logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) + <-c + } + defer p.poolRemove("pull", "layer:"+id) + + if !p.graph.Exists(id) { + out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil)) + var ( + imgJSON []byte + imgSize int + err error + img *image.Image + ) + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint) + if err != nil && j == retries { + out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) + return layersDownloaded, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + img, err = image.NewImgJSON(imgJSON) + layersDownloaded = true + if err != nil && j == retries { + out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) + return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err) + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else { + break + } + } + + for j := 1; j <= retries; j++ { + // Get the layer + status := "Pulling fs layer" + if j > 1 { + status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) + } + out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil)) + layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize)) + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) + return layersDownloaded, err + } + layersDownloaded = true + defer layer.Close() + + err = p.graph.Register(v1ImageDescriptor{img}, + progressreader.New(progressreader.Config{ + In: layer, + Out: out, + Formatter: p.sf, + Size: imgSize, + NewLines: false, + ID: stringid.TruncateID(id), + Action: "Downloading", + })) + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil)) + return layersDownloaded, err + } else { + break + } + } + } + out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil)) + } + return layersDownloaded, nil +} diff --git a/graph/pull_v2.go b/graph/pull_v2.go new file mode 100644 index 00000000..d6ff4302 --- /dev/null +++ b/graph/pull_v2.go @@ -0,0 +1,715 @@ +package graph + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "golang.org/x/net/context" +) + +type v2Puller struct { + *TagStore + endpoint registry.APIEndpoint + config *ImagePullConfig + sf *streamformatter.StreamFormatter + repoInfo *registry.RepositoryInfo + repo distribution.Repository + sessionID string +} + +func (p *v2Puller) Pull(tag string) (fallback bool, err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig) + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return true, err + } + + p.sessionID = stringid.GenerateRandomID() + + if err := p.pullV2Repository(tag); err != nil { + if registry.ContinueOnError(err) { + logrus.Debugf("Error trying v2 registry: %v", err) + return true, err + } + return false, err + } + return false, nil +} + +func (p *v2Puller) pullV2Repository(tag string) (err error) { + var tags []string + taggedName := p.repoInfo.LocalName + if len(tag) > 0 { + tags = []string{tag} + taggedName = utils.ImageReference(p.repoInfo.LocalName, tag) + } else { + var err error + + manSvc, err := p.repo.Manifests(context.Background()) + if err != nil { + return err + } + + tags, err = manSvc.Tags() + if err != nil { + return err + } + + } + + poolKey := "v2:" + taggedName + c, err := p.poolAdd("pull", poolKey) + if err != nil { + if c != nil { + // Another pull of the same repository is already taking place; just wait for it to finish + p.config.OutStream.Write(p.sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", p.repoInfo.CanonicalName)) + <-c + return nil + } + return err + } + defer p.poolRemove("pull", poolKey) + + var layersDownloaded bool + for _, tag := range tags { + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + pulledNew, err := p.pullV2Tag(tag, taggedName) + if err != nil { + return err + } + layersDownloaded = layersDownloaded || pulledNew + } + + WriteStatus(taggedName, p.config.OutStream, p.sf, layersDownloaded) + + return nil +} + +// downloadInfo is used to pass information from download to extractor +type downloadInfo struct { + img contentAddressableDescriptor + tmpFile *os.File + digest digest.Digest + layer distribution.ReadSeekCloser + size int64 + err chan error + out io.Writer // Download progress is written here. +} + +// contentAddressableDescriptor is used to pass image data from a manifest to the +// graph. +type contentAddressableDescriptor struct { + id string + parent string + strongID digest.Digest + compatibilityID string + config []byte + v1Compatibility []byte +} + +func newContentAddressableImage(v1Compatibility []byte, blobSum digest.Digest, parent digest.Digest) (contentAddressableDescriptor, error) { + img := contentAddressableDescriptor{ + v1Compatibility: v1Compatibility, + } + + var err error + img.config, err = image.MakeImageConfig(v1Compatibility, blobSum, parent) + if err != nil { + return img, err + } + img.strongID, err = image.StrongID(img.config) + if err != nil { + return img, err + } + + unmarshalledConfig, err := image.NewImgJSON(v1Compatibility) + if err != nil { + return img, err + } + + img.compatibilityID = unmarshalledConfig.ID + img.id = img.strongID.Hex() + + return img, nil +} + +// ID returns the actual ID to be used for the downloaded image. This may be +// a computed ID. +func (img contentAddressableDescriptor) ID() string { + return img.id +} + +// Parent returns the parent ID to be used for the image. This may be a +// computed ID. +func (img contentAddressableDescriptor) Parent() string { + return img.parent +} + +// MarshalConfig renders the image structure into JSON. +func (img contentAddressableDescriptor) MarshalConfig() ([]byte, error) { + return img.config, nil +} + +type errVerification struct{} + +func (errVerification) Error() string { return "verification failed" } + +func (p *v2Puller) download(di *downloadInfo) { + logrus.Debugf("pulling blob %q to %s", di.digest, di.img.id) + + out := di.out + + poolKey := "v2img:" + di.img.id + if c, err := p.poolAdd("pull", poolKey); err != nil { + if c != nil { + out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Layer already being pulled by another client. Waiting.", nil)) + <-c + out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Download complete", nil)) + } else { + logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", di.img.id, err) + } + di.err <- nil + return + } + + defer p.poolRemove("pull", poolKey) + tmpFile, err := ioutil.TempFile("", "GetImageBlob") + if err != nil { + di.err <- err + return + } + + blobs := p.repo.Blobs(context.Background()) + + desc, err := blobs.Stat(context.Background(), di.digest) + if err != nil { + logrus.Debugf("Error statting layer: %v", err) + di.err <- err + return + } + di.size = desc.Size + + layerDownload, err := blobs.Open(context.Background(), di.digest) + if err != nil { + logrus.Debugf("Error fetching layer: %v", err) + di.err <- err + return + } + defer layerDownload.Close() + + verifier, err := digest.NewDigestVerifier(di.digest) + if err != nil { + di.err <- err + return + } + + reader := progressreader.New(progressreader.Config{ + In: ioutil.NopCloser(io.TeeReader(layerDownload, verifier)), + Out: out, + Formatter: p.sf, + Size: int(di.size), + NewLines: false, + ID: stringid.TruncateID(di.img.id), + Action: "Downloading", + }) + io.Copy(tmpFile, reader) + + out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Verifying Checksum", nil)) + + if !verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest) + logrus.Error(err) + di.err <- err + return + } + + out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Download complete", nil)) + + logrus.Debugf("Downloaded %s to tempfile %s", di.img.id, tmpFile.Name()) + di.tmpFile = tmpFile + di.layer = layerDownload + + di.err <- nil +} + +func (p *v2Puller) pullV2Tag(tag, taggedName string) (tagUpdated bool, err error) { + logrus.Debugf("Pulling tag from V2 registry: %q", tag) + out := p.config.OutStream + + manSvc, err := p.repo.Manifests(context.Background()) + if err != nil { + return false, err + } + + unverifiedManifest, err := manSvc.GetByTag(tag) + if err != nil { + return false, err + } + if unverifiedManifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag %q", tag) + } + var verifiedManifest *manifest.Manifest + verifiedManifest, err = verifyManifest(unverifiedManifest, tag) + if err != nil { + return false, err + } + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return false, err + } + + imgs, err := p.getImageInfos(*verifiedManifest) + if err != nil { + return false, err + } + + // By using a pipeWriter for each of the downloads to write their progress + // to, we can avoid an issue where this function returns an error but + // leaves behind running download goroutines. By splitting the writer + // with a pipe, we can close the pipe if there is any error, consequently + // causing each download to cancel due to an error writing to this pipe. + pipeReader, pipeWriter := io.Pipe() + go func() { + if _, err := io.Copy(out, pipeReader); err != nil { + logrus.Errorf("error copying from layer download progress reader: %s", err) + if err := pipeReader.CloseWithError(err); err != nil { + logrus.Errorf("error closing the progress reader: %s", err) + } + } + }() + defer func() { + if err != nil { + // All operations on the pipe are synchronous. This call will wait + // until all current readers/writers are done using the pipe then + // set the error. All successive reads/writes will return with this + // error. + pipeWriter.CloseWithError(fmt.Errorf("download canceled %+v", err)) + } else { + // If no error then just close the pipe. + pipeWriter.Close() + } + }() + + out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name())) + + downloads := make([]downloadInfo, len(verifiedManifest.FSLayers)) + + layerIDs := []string{} + defer func() { + p.graph.Release(p.sessionID, layerIDs...) + }() + + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + + img := imgs[i] + downloads[i].img = img + downloads[i].digest = verifiedManifest.FSLayers[i].BlobSum + + p.graph.Retain(p.sessionID, img.id) + layerIDs = append(layerIDs, img.id) + + p.graph.imageMutex.Lock(img.id) + + // Check if exists + if p.graph.Exists(img.id) { + if err := p.validateImageInGraph(img.id, imgs, i); err != nil { + p.graph.imageMutex.Unlock(img.id) + return false, fmt.Errorf("image validation failed: %v", err) + } + logrus.Debugf("Image already exists: %s", img.id) + p.graph.imageMutex.Unlock(img.id) + continue + } + p.graph.imageMutex.Unlock(img.id) + + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.id), "Pulling fs layer", nil)) + + downloads[i].err = make(chan error, 1) + downloads[i].out = pipeWriter + go p.download(&downloads[i]) + } + + for i := len(downloads) - 1; i >= 0; i-- { + d := &downloads[i] + if d.err != nil { + if err := <-d.err; err != nil { + return false, err + } + } + if d.layer != nil { + // if tmpFile is empty assume download and extracted elsewhere + defer os.Remove(d.tmpFile.Name()) + defer d.tmpFile.Close() + d.tmpFile.Seek(0, 0) + if d.tmpFile != nil { + err := func() error { + reader := progressreader.New(progressreader.Config{ + In: d.tmpFile, + Out: out, + Formatter: p.sf, + Size: int(d.size), + NewLines: false, + ID: stringid.TruncateID(d.img.id), + Action: "Extracting", + }) + + p.graph.imageMutex.Lock(d.img.id) + defer p.graph.imageMutex.Unlock(d.img.id) + + // Must recheck the data on disk if any exists. + // This protects against races where something + // else is written to the graph under this ID + // after attemptIDReuse. + if p.graph.Exists(d.img.id) { + if err := p.validateImageInGraph(d.img.id, imgs, i); err != nil { + return fmt.Errorf("image validation failed: %v", err) + } + } + + if err := p.graph.register(d.img, reader); err != nil { + return err + } + + if err := p.graph.setLayerDigest(d.img.id, d.digest); err != nil { + return err + } + + if err := p.graph.setV1CompatibilityConfig(d.img.id, d.img.v1Compatibility); err != nil { + return err + } + + return nil + }() + if err != nil { + return false, err + } + + // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) + } + out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.id), "Pull complete", nil)) + tagUpdated = true + } else { + out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.id), "Already exists", nil)) + } + } + + manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName) + if err != nil { + return false, err + } + + // Check for new tag if no layers downloaded + if !tagUpdated { + repo, err := p.Get(p.repoInfo.LocalName) + if err != nil { + return false, err + } + if repo != nil { + if _, exists := repo[tag]; !exists { + tagUpdated = true + } + } else { + tagUpdated = true + } + } + + if utils.DigestReference(tag) { + // TODO(stevvooe): Ideally, we should always set the digest so we can + // use the digest whether we pull by it or not. Unfortunately, the tag + // store treats the digest as a separate tag, meaning there may be an + // untagged digest image that would seem to be dangling by a user. + if err = p.SetDigest(p.repoInfo.LocalName, tag, downloads[0].img.id); err != nil { + return false, err + } + } else { + // only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest) + if err = p.Tag(p.repoInfo.LocalName, tag, downloads[0].img.id, true); err != nil { + return false, err + } + } + + if manifestDigest != "" { + out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest)) + } + + return tagUpdated, nil +} + +func verifyManifest(signedManifest *manifest.SignedManifest, tag string) (m *manifest.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if manifestDigest, err := digest.ParseDigest(tag); err == nil { + verifier, err := digest.NewDigestVerifier(manifestDigest) + if err != nil { + return nil, err + } + payload, err := signedManifest.Payload() + if err != nil { + // If this failed, the signatures section was corrupted + // or missing. Treat the entire manifest as the payload. + payload = signedManifest.Raw + } + if _, err := verifier.Write(payload); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", manifestDigest) + logrus.Error(err) + return nil, err + } + + var verifiedManifest manifest.Manifest + if err = json.Unmarshal(payload, &verifiedManifest); err != nil { + return nil, err + } + m = &verifiedManifest + } else { + m = &signedManifest.Manifest + } + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for tag %q", tag) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for tag %q", tag) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *manifest.Manifest) error { + images := make([]*image.Image, len(m.FSLayers)) + for i := range m.FSLayers { + img, err := image.NewImgJSON([]byte(m.History[i].V1Compatibility)) + if err != nil { + return err + } + images[i] = img + if err := image.ValidateID(img.ID); err != nil { + return err + } + } + + if images[len(images)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image.") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range images { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest.", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(images) - 2; i >= 0; i-- { + if images[i].ID == images[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if images[i].Parent != images[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", images[i+1].ID, images[i].Parent) + } + } + + return nil +} + +// getImageInfos returns an imageinfo struct for every image in the manifest. +// These objects contain both calculated strongIDs and compatibilityIDs found +// in v1Compatibility object. +func (p *v2Puller) getImageInfos(m manifest.Manifest) ([]contentAddressableDescriptor, error) { + imgs := make([]contentAddressableDescriptor, len(m.FSLayers)) + + var parent digest.Digest + for i := len(imgs) - 1; i >= 0; i-- { + var err error + imgs[i], err = newContentAddressableImage([]byte(m.History[i].V1Compatibility), m.FSLayers[i].BlobSum, parent) + if err != nil { + return nil, err + } + parent = imgs[i].strongID + } + + p.attemptIDReuse(imgs) + + return imgs, nil +} + +var idReuseLock sync.Mutex + +// attemptIDReuse does a best attempt to match verified compatibilityIDs +// already in the graph with the computed strongIDs so we can keep using them. +// This process will never fail but may just return the strongIDs if none of +// the compatibilityIDs exists or can be verified. If the strongIDs themselves +// fail verification, we deterministically generate alternate IDs to use until +// we find one that's available or already exists with the correct data. +func (p *v2Puller) attemptIDReuse(imgs []contentAddressableDescriptor) { + // This function needs to be protected with a global lock, because it + // locks multiple IDs at once, and there's no good way to make sure + // the locking happens a deterministic order. + idReuseLock.Lock() + defer idReuseLock.Unlock() + + idMap := make(map[string]struct{}) + for _, img := range imgs { + idMap[img.id] = struct{}{} + idMap[img.compatibilityID] = struct{}{} + + if p.graph.Exists(img.compatibilityID) { + if _, err := p.graph.GenerateV1CompatibilityChain(img.compatibilityID); err != nil { + logrus.Debugf("Migration v1Compatibility generation error: %v", err) + return + } + } + } + for id := range idMap { + p.graph.imageMutex.Lock(id) + defer p.graph.imageMutex.Unlock(id) + } + + // continueReuse controls whether the function will try to find + // existing layers on disk under the old v1 IDs, to avoid repulling + // them. The hashes are checked to ensure these layers are okay to + // use. continueReuse starts out as true, but is set to false if + // the code encounters something that doesn't match the expected hash. + continueReuse := true + + for i := len(imgs) - 1; i >= 0; i-- { + if p.graph.Exists(imgs[i].id) { + // Found an image in the graph under the strongID. Validate the + // image before using it. + if err := p.validateImageInGraph(imgs[i].id, imgs, i); err != nil { + continueReuse = false + logrus.Debugf("not using existing strongID: %v", err) + + // The strong ID existed in the graph but didn't + // validate successfully. We can't use the strong ID + // because it didn't validate successfully. Treat the + // graph like a hash table with probing... compute + // SHA256(id) until we find an ID that either doesn't + // already exist in the graph, or has existing content + // that validates successfully. + for { + if err := p.tryNextID(imgs, i, idMap); err != nil { + logrus.Debug(err.Error()) + } else { + break + } + } + } + continue + } + + if continueReuse { + compatibilityID := imgs[i].compatibilityID + if err := p.validateImageInGraph(compatibilityID, imgs, i); err != nil { + logrus.Debugf("stopping ID reuse: %v", err) + continueReuse = false + } else { + // The compatibility ID exists in the graph and was + // validated. Use it. + imgs[i].id = compatibilityID + } + } + } + + // fix up the parents of the images + for i := 0; i < len(imgs); i++ { + if i == len(imgs)-1 { // Base layer + imgs[i].parent = "" + } else { + imgs[i].parent = imgs[i+1].id + } + } +} + +// validateImageInGraph checks that an image in the graph has the expected +// strongID. id is the entry in the graph to check, imgs is the slice of +// images being processed (for access to the parent), and i is the index +// into this slice which the graph entry should be checked against. +func (p *v2Puller) validateImageInGraph(id string, imgs []contentAddressableDescriptor, i int) error { + img, err := p.graph.Get(id) + if err != nil { + return fmt.Errorf("missing: %v", err) + } + layerID, err := p.graph.getLayerDigest(id) + if err != nil { + return fmt.Errorf("digest: %v", err) + } + var parentID digest.Digest + if i != len(imgs)-1 { + if img.Parent != imgs[i+1].id { // comparing that graph points to validated ID + return fmt.Errorf("parent: %v %v", img.Parent, imgs[i+1].id) + } else { + parentID = imgs[i+1].strongID + } + } else if img.Parent != "" { + return fmt.Errorf("unexpected parent: %v", img.Parent) + } + + v1Config, err := p.graph.getV1CompatibilityConfig(img.ID) + if err != nil { + return fmt.Errorf("v1Compatibility: %v %v", img.ID, err) + } + + json, err := image.MakeImageConfig(v1Config, layerID, parentID) + if err != nil { + return fmt.Errorf("make config: %v", err) + } + + if dgst, err := image.StrongID(json); err == nil && dgst == imgs[i].strongID { + logrus.Debugf("Validated %v as %v", dgst, id) + } else { + return fmt.Errorf("digest mismatch: %v %v, error: %v", dgst, imgs[i].strongID, err) + } + + // All clear + return nil +} + +func (p *v2Puller) tryNextID(imgs []contentAddressableDescriptor, i int, idMap map[string]struct{}) error { + nextID, _ := digest.FromBytes([]byte(imgs[i].id)) + imgs[i].id = nextID.Hex() + + if _, exists := idMap[imgs[i].id]; !exists { + p.graph.imageMutex.Lock(imgs[i].id) + defer p.graph.imageMutex.Unlock(imgs[i].id) + } + + if p.graph.Exists(imgs[i].id) { + if err := p.validateImageInGraph(imgs[i].id, imgs, i); err != nil { + return fmt.Errorf("not using existing strongID permutation %s: %v", imgs[i].id, err) + } + } + return nil +} diff --git a/graph/pull_v2_test.go b/graph/pull_v2_test.go new file mode 100644 index 00000000..5d92fa9f --- /dev/null +++ b/graph/pull_v2_test.go @@ -0,0 +1,194 @@ +package graph + +import ( + "encoding/json" + "io/ioutil" + "reflect" + "strings" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// TestValidateManifest verifies the validateManifest function +func TestValidateManifest(t *testing.T) { + expectedDigest := "sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd" + expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + + // Good manifest + + goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var goodSignedManifest manifest.SignedManifest + err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err := verifyManifest(&goodSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in good manifest") + } + + // "Extra data" manifest + + extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var extraDataSignedManifest manifest.SignedManifest + err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifyManifest(&extraDataSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in extra data manifest") + } + + // Bad manifest + + badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var badSignedManifest manifest.SignedManifest + err = json.Unmarshal(badManifestBytes, &badSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifyManifest(&badSignedManifest, expectedDigest) + if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { + t.Fatal("expected validateManifest to fail with digest error") + } + + // Manifest with no signature + + expectedWholeFileDigest := "7ec3615a120efcdfc270e9c7ea4183330775a3e52a09e2efb194b9a7c18e5ff7" + + noSignatureManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/no_signature_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var noSignatureSignedManifest manifest.SignedManifest + err = json.Unmarshal(noSignatureManifestBytes, &noSignatureSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifyManifest(&noSignatureSignedManifest, expectedWholeFileDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in no-signature manifest") + } +} + +// TestFixManifestLayers checks that fixManifestLayers removes a duplicate +// layer, and that it makes no changes to the manifest when called a second +// time, after the duplicate is removed. +func TestFixManifestLayers(t *testing.T) { + duplicateLayerManifest := manifest.Manifest{ + FSLayers: []manifest.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []manifest.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + duplicateLayerManifestExpectedOutput := manifest.Manifest{ + FSLayers: []manifest.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []manifest.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") + } + + // Run fixManifestLayers again and confirm that it doesn't change the + // manifest (which no longer has duplicate layers). + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") + } +} + +// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails +// if the base layer configuration specifies a parent. +func TestFixManifestLayersBaseLayerParent(t *testing.T) { + duplicateLayerManifest := manifest.Manifest{ + FSLayers: []manifest.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []manifest.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID in the base layer of the image.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails +// if an image configuration specifies a parent that doesn't directly follow +// that (deduplicated) image in the image history. +func TestFixManifestLayersBadParent(t *testing.T) { + duplicateLayerManifest := manifest.Manifest{ + FSLayers: []manifest.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []manifest.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} diff --git a/graph/push.go b/graph/push.go new file mode 100644 index 00000000..dba8bb96 --- /dev/null +++ b/graph/push.go @@ -0,0 +1,107 @@ +package graph + +import ( + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/registry" +) + +type ImagePushConfig struct { + MetaHeaders map[string][]string + AuthConfig *cliconfig.AuthConfig + Tag string + OutStream io.Writer +} + +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push() (fallback bool, err error) +} + +func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig, sf *streamformatter.StreamFormatter) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + TagStore: s, + endpoint: endpoint, + localRepo: localRepo, + repoInfo: repoInfo, + config: imagePushConfig, + sf: sf, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + TagStore: s, + endpoint: endpoint, + localRepo: localRepo, + repoInfo: repoInfo, + config: imagePushConfig, + sf: sf, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// FIXME: Allow to interrupt current push when new push of same image is done. +func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error { + var sf = streamformatter.NewJSONStreamFormatter() + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := s.registryService.ResolveRepository(localName) + if err != nil { + return err + } + + endpoints, err := s.registryService.LookupPushEndpoints(repoInfo.CanonicalName) + if err != nil { + return err + } + + reposLen := 1 + if imagePushConfig.Tag == "" { + reposLen = len(s.Repositories[repoInfo.LocalName]) + } + + imagePushConfig.OutStream.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) + + // If it fails, try to get the repository + localRepo, exists := s.Repositories[repoInfo.LocalName] + if !exists { + return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName) + } + + var lastErr error + for _, endpoint := range endpoints { + logrus.Debugf("Trying to push %s to %s %s", repoInfo.CanonicalName, endpoint.URL, endpoint.Version) + + pusher, err := s.NewPusher(endpoint, localRepo, repoInfo, imagePushConfig, sf) + if err != nil { + lastErr = err + continue + } + if fallback, err := pusher.Push(); err != nil { + if fallback { + lastErr = err + continue + } + logrus.Debugf("Not continuing with error: %v", err) + return err + + } + + s.eventsService.Log("push", repoInfo.LocalName, "") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.CanonicalName) + } + return lastErr +} diff --git a/graph/push_v1.go b/graph/push_v1.go new file mode 100644 index 00000000..71d4c8e8 --- /dev/null +++ b/graph/push_v1.go @@ -0,0 +1,356 @@ +package graph + +import ( + "fmt" + "io" + "os" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +type v1Pusher struct { + *TagStore + endpoint registry.APIEndpoint + localRepo Repository + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + sf *streamformatter.StreamFormatter + session *registry.Session + + out io.Writer +} + +func (p *v1Pusher) Push() (fallback bool, err error) { + tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return false, err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return true, err + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return true, err + } + if err := p.pushRepository(p.config.Tag); err != nil { + // TODO(dmcgowan): Check if should fallback + return false, err + } + return false, nil +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList(requestedTag string) ([]string, map[string][]string, error) { + var ( + imageList []string + imagesSeen = make(map[string]bool) + tagsByImage = make(map[string][]string) + ) + + for tag, id := range p.localRepo { + if requestedTag != "" && requestedTag != tag { + // Include only the requested tag. + continue + } + + if utils.DigestReference(tag) { + // Ignore digest references. + continue + } + + var imageListForThisTag []string + + tagsByImage[id] = append(tagsByImage[id], tag) + + for img, err := p.graph.Get(id); img != nil; img, err = p.graph.GetParent(img) { + if err != nil { + return nil, nil, err + } + + if imagesSeen[img.ID] { + // This image is already on the list, we can ignore it and all its parents + break + } + + imagesSeen[img.ID] = true + imageListForThisTag = append(imageListForThisTag, img.ID) + } + + // reverse the image list for this tag (so the "most"-parent image is first) + for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { + imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return imageList, tagsByImage, nil +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func (s *TagStore) createImageIndex(images []string, tags map[string][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, id := range images { + if tags, hasTags := tags[id]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: id, + Tag: tag, + }) + } + continue + } + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: id, + Tag: "", + }) + } + return imageIndex +} + +type imagePushData struct { + id string + compatibilityID string + endpoint string + tokens []string +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, images chan imagePushData, imagesToPush chan string) { + defer wg.Done() + for image := range images { + if err := p.session.LookupRemoteImage(image.compatibilityID, image.endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- image.id + continue + } + p.out.Write(p.sf.FormatStatus("", "Image %s already pushed, skipping", stringid.TruncateID(image.id))) + } +} + +func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageIDs []string, tags map[string][]string, repo *registry.RepositoryData) error { + workerCount := len(imageIDs) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan imagePushData, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, id := range imageIDs { + compatibilityID, err := p.getV1ID(id) + if err != nil { + return err + } + imageData <- imagePushData{ + id: id, + compatibilityID: compatibilityID, + endpoint: endpoint, + tokens: repo.Tokens, + } + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, id := range imageIDs { + if _, push := shouldPush[id]; push { + if _, err := p.pushImage(id, endpoint, repo.Tokens); err != nil { + // FIXME: Continue on error? + return err + } + } + for _, tag := range tags[id] { + p.out.Write(p.sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+p.repoInfo.RemoteName+"/tags/"+tag)) + compatibilityID, err := p.getV1ID(id) + if err != nil { + return err + } + if err := p.session.PushRegistryTag(p.repoInfo.RemoteName, compatibilityID, tag, endpoint); err != nil { + return err + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(tag string) error { + logrus.Debugf("Local repo: %s", p.localRepo) + p.out = ioutils.NewWriteFlusher(p.config.OutStream) + imgList, tags, err := p.getImageList(tag) + if err != nil { + return err + } + p.out.Write(p.sf.FormatStatus("", "Sending image list")) + + imageIndex := p.createImageIndex(imgList, tags) + logrus.Debugf("Preparing to push %s with the following images and tags", p.localRepo) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + + // convert IDs to compatibilityIDs, imageIndex only used in registry calls + data.ID, err = p.getV1ID(data.ID) + if err != nil { + return err + } + } + + if _, err := p.poolAdd("push", p.repoInfo.LocalName); err != nil { + return err + } + defer p.poolRemove("push", p.repoInfo.LocalName) + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, false, nil) + if err != nil { + return err + } + nTag := 1 + if tag == "" { + nTag = len(p.localRepo) + } + p.out.Write(p.sf.FormatStatus("", "Pushing repository %s (%d tags)", p.repoInfo.CanonicalName, nTag)) + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(imgID, ep string, token []string) (checksum string, err error) { + jsonRaw, err := p.getV1Config(imgID) + if err != nil { + return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) + } + p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil)) + + compatibilityID, err := p.getV1ID(imgID) + if err != nil { + return "", err + } + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: compatibilityID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image already pushed, skipping", nil)) + return "", nil + } + return "", err + } + + layerData, err := p.graph.TempLayerArchive(imgID, p.sf, p.out) + if err != nil { + return "", fmt.Errorf("Failed to generate layer archive: %s", err) + } + defer os.RemoveAll(layerData.Name()) + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", imgID, layerData.Size) + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(imgData.ID, + progressreader.New(progressreader.Config{ + In: layerData, + Out: p.out, + Formatter: p.sf, + Size: int(layerData.Size), + NewLines: false, + ID: stringid.TruncateID(imgID), + Action: "Pushing", + }), ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image successfully pushed", nil)) + return imgData.Checksum, nil +} + +// getV1ID returns the compatibilityID for the ID in the graph. compatibilityID +// is read from from the v1Compatibility config file in the disk. +func (p *v1Pusher) getV1ID(id string) (string, error) { + jsonData, err := p.getV1Config(id) + if err != nil { + return "", err + } + img, err := image.NewImgJSON(jsonData) + if err != nil { + return "", err + } + return img.ID, nil +} + +// getV1Config returns v1Compatibility config for the image in the graph. If +// there is no v1Compatibility file on disk for the image +func (p *v1Pusher) getV1Config(id string) ([]byte, error) { + jsonData, err := p.graph.GenerateV1CompatibilityChain(id) + if err != nil { + return nil, err + } + return jsonData, nil +} diff --git a/graph/push_v2.go b/graph/push_v2.go new file mode 100644 index 00000000..3f232991 --- /dev/null +++ b/graph/push_v2.go @@ -0,0 +1,261 @@ +package graph + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + "golang.org/x/net/context" +) + +type v2Pusher struct { + *TagStore + endpoint registry.APIEndpoint + localRepo Repository + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + sf *streamformatter.StreamFormatter + repo distribution.Repository +} + +func (p *v2Pusher) Push() (fallback bool, err error) { + p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig) + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return true, err + } + return false, p.pushV2Repository(p.config.Tag) +} + +func (p *v2Pusher) getImageTags(askedTag string) ([]string, error) { + logrus.Debugf("Checking %q against %#v", askedTag, p.localRepo) + if len(askedTag) > 0 { + if _, ok := p.localRepo[askedTag]; !ok || utils.DigestReference(askedTag) { + return nil, fmt.Errorf("Tag does not exist for %s", askedTag) + } + return []string{askedTag}, nil + } + var tags []string + for tag := range p.localRepo { + if !utils.DigestReference(tag) { + tags = append(tags, tag) + } + } + return tags, nil +} + +func (p *v2Pusher) pushV2Repository(tag string) error { + localName := p.repoInfo.LocalName + if _, err := p.poolAdd("push", localName); err != nil { + return err + } + defer p.poolRemove("push", localName) + + tags, err := p.getImageTags(tag) + if err != nil { + return fmt.Errorf("error getting tags for %s: %s", localName, err) + } + if len(tags) == 0 { + return fmt.Errorf("no tags to push for %s", localName) + } + + for _, tag := range tags { + if err := p.pushV2Tag(tag); err != nil { + return err + } + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(tag string) error { + logrus.Debugf("Pushing repository: %s:%s", p.repo.Name(), tag) + + layerId, exists := p.localRepo[tag] + if !exists { + return fmt.Errorf("tag does not exist: %s", tag) + } + + layersSeen := make(map[string]bool) + + layer, err := p.graph.Get(layerId) + if err != nil { + return err + } + + m := &manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: p.repo.Name(), + Tag: tag, + Architecture: layer.Architecture, + FSLayers: []manifest.FSLayer{}, + History: []manifest.History{}, + } + + var metadata runconfig.Config + if layer != nil && layer.Config != nil { + metadata = *layer.Config + } + + out := p.config.OutStream + + for ; layer != nil; layer, err = p.graph.GetParent(layer) { + if err != nil { + return err + } + + if layersSeen[layer.ID] { + break + } + + logrus.Debugf("Pushing layer: %s", layer.ID) + + if layer.Config != nil && metadata.Image != layer.ID { + if err := runconfig.Merge(&metadata, layer.Config); err != nil { + return err + } + } + + var exists bool + dgst, err := p.graph.GetLayerDigest(layer.ID) + switch err { + case nil: + _, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst) + switch err { + case nil: + exists = true + out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil)) + case distribution.ErrBlobUnknown: + // nop + default: + out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil)) + return err + } + case ErrDigestNotSet: + // nop + case digest.ErrDigestInvalidFormat, digest.ErrDigestUnsupported: + return fmt.Errorf("error getting image checksum: %v", err) + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then fetch it. + if !exists { + if pushDigest, err := p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil { + return err + } else if pushDigest != dgst { + // Cache new checksum + if err := p.graph.SetLayerDigest(layer.ID, pushDigest); err != nil { + return err + } + dgst = pushDigest + } + } + + // read v1Compatibility config, generate new if needed + jsonData, err := p.graph.GenerateV1CompatibilityChain(layer.ID) + if err != nil { + return err + } + + m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst}) + m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)}) + + layersSeen[layer.ID] = true + } + + logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID()) + signed, err := manifest.Sign(m, p.trustKey) + if err != nil { + return err + } + + manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name()) + if err != nil { + return err + } + if manifestDigest != "" { + out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tag, manifestDigest, manifestSize)) + } + + manSvc, err := p.repo.Manifests(context.Background()) + if err != nil { + return err + } + return manSvc.Put(signed) +} + +func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) { + out := p.config.OutStream + + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil)) + + image, err := p.graph.Get(img.ID) + if err != nil { + return "", err + } + arch, err := p.graph.TarLayer(image) + if err != nil { + return "", err + } + + tf, err := p.graph.newTempFile() + if err != nil { + return "", err + } + defer func() { + tf.Close() + os.Remove(tf.Name()) + }() + + size, dgst, err := bufferToFile(tf, arch) + if err != nil { + return "", err + } + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size) + layerUpload, err := bs.Create(context.Background()) + if err != nil { + return "", err + } + defer layerUpload.Close() + + reader := progressreader.New(progressreader.Config{ + In: ioutil.NopCloser(tf), + Out: out, + Formatter: p.sf, + Size: int(size), + NewLines: false, + ID: stringid.TruncateID(img.ID), + Action: "Pushing", + }) + n, err := layerUpload.ReadFrom(reader) + if err != nil { + return "", err + } + if n != size { + return "", fmt.Errorf("short upload: only wrote %d of %d", n, size) + } + + desc := distribution.Descriptor{Digest: dgst} + if _, err := layerUpload.Commit(context.Background(), desc); err != nil { + return "", err + } + + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil)) + + return dgst, nil +} diff --git a/graph/registry.go b/graph/registry.go new file mode 100644 index 00000000..d9fada6b --- /dev/null +++ b/graph/registry.go @@ -0,0 +1,112 @@ +package graph + +import ( + "errors" + "net" + "net/http" + "net/url" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type dumbCredentialStore struct { + auth *cliconfig.AuthConfig +} + +func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { + return dcs.auth.Username, dcs.auth.Password +} + +// v2 only +func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *cliconfig.AuthConfig) (distribution.Repository, error) { + ctx := context.Background() + + repoName := repoInfo.CanonicalName + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = repoInfo.RemoteName + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + modifiers := registry.DockerHeaders(metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 5 * time.Second, + } + endpointStr := endpoint.URL + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, endpoint.VersionHeader) + if endpoint.VersionHeader != "" && len(endpoint.Versions) > 0 { + var foundVersion bool + for _, version := range endpoint.Versions { + for _, pingVersion := range versions { + if version == pingVersion { + foundVersion = true + } + } + } + if !foundVersion { + return nil, errors.New("endpoint does not support v2 API") + } + } + + challengeManager := auth.NewSimpleChallengeManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } + + creds := dumbCredentialStore{auth: authConfig} + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, "push", "pull") + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(base, modifiers...) + + return client.NewRepository(ctx, repoName, endpoint.URL, tr) +} + +func digestFromManifest(m *manifest.SignedManifest, localName string) (digest.Digest, int, error) { + payload, err := m.Payload() + if err != nil { + // If this failed, the signatures section was corrupted + // or missing. Treat the entire manifest as the payload. + payload = m.Raw + } + manifestDigest, err := digest.FromBytes(payload) + if err != nil { + logrus.Infof("Could not compute manifest digest for %s:%s : %v", localName, m.Tag, err) + } + return manifestDigest, len(payload), nil +} diff --git a/graph/service.go b/graph/service.go new file mode 100644 index 00000000..0066bb1a --- /dev/null +++ b/graph/service.go @@ -0,0 +1,67 @@ +package graph + +import ( + "fmt" + "io" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" +) + +// Lookup return an image encoded in JSON +func (s *TagStore) Lookup(name string) (*types.ImageInspect, error) { + image, err := s.LookupImage(name) + if err != nil || image == nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + imageInspect := &types.ImageInspect{ + Id: image.ID, + Parent: image.Parent, + Comment: image.Comment, + Created: image.Created.Format(time.RFC3339Nano), + Container: image.Container, + ContainerConfig: &image.ContainerConfig, + DockerVersion: image.DockerVersion, + Author: image.Author, + Config: image.Config, + Architecture: image.Architecture, + Os: image.OS, + Size: image.Size, + VirtualSize: s.graph.GetParentsSize(image, 0) + image.Size, + } + + imageInspect.GraphDriver.Name = s.graph.driver.String() + + graphDriverData, err := s.graph.driver.GetMetadata(image.ID) + if err != nil { + return nil, err + } + imageInspect.GraphDriver.Data = graphDriverData + return imageInspect, nil +} + +// ImageTarLayer return the tarLayer of the image +func (s *TagStore) ImageTarLayer(name string, dest io.Writer) error { + if image, err := s.LookupImage(name); err == nil && image != nil { + // On Windows, the base layer cannot be exported + if runtime.GOOS != "windows" || image.Parent != "" { + + fs, err := s.graph.TarLayer(image) + if err != nil { + return err + } + defer fs.Close() + + written, err := io.Copy(dest, fs) + if err != nil { + return err + } + logrus.Debugf("rendered layer for %s of [%d] size", image.ID, written) + } + return nil + } + return fmt.Errorf("No such image: %s", name) +} diff --git a/graph/tags.go b/graph/tags.go new file mode 100644 index 00000000..7e16abac --- /dev/null +++ b/graph/tags.go @@ -0,0 +1,436 @@ +package graph + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/graph/tags" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/libtrust" +) + +const DEFAULTTAG = "latest" + +type TagStore struct { + path string + graph *Graph + Repositories map[string]Repository + trustKey libtrust.PrivateKey + sync.Mutex + // FIXME: move push/pull-related fields + // to a helper type + pullingPool map[string]chan struct{} + pushingPool map[string]chan struct{} + registryService *registry.Service + eventsService *events.Events +} + +type Repository map[string]string + +// update Repository mapping with content of u +func (r Repository) Update(u Repository) { + for k, v := range u { + r[k] = v + } +} + +// return true if the contents of u Repository, are wholly contained in r Repository +func (r Repository) Contains(u Repository) bool { + for k, v := range u { + // if u's key is not present in r OR u's key is present, but not the same value + if rv, ok := r[k]; !ok || (ok && rv != v) { + return false + } + } + return true +} + +type TagStoreConfig struct { + Graph *Graph + Key libtrust.PrivateKey + Registry *registry.Service + Events *events.Events +} + +func NewTagStore(path string, cfg *TagStoreConfig) (*TagStore, error) { + abspath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + store := &TagStore{ + path: abspath, + graph: cfg.Graph, + trustKey: cfg.Key, + Repositories: make(map[string]Repository), + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), + registryService: cfg.Registry, + eventsService: cfg.Events, + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +func (store *TagStore) save() error { + // Store the json ball + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil { + return err + } + return nil +} + +func (store *TagStore) reload() error { + f, err := os.Open(store.path) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + return nil +} + +func (store *TagStore) LookupImage(name string) (*image.Image, error) { + // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else + // (so we can pass all errors here) + repoName, ref := parsers.ParseRepositoryTag(name) + if ref == "" { + ref = DEFAULTTAG + } + var ( + err error + img *image.Image + ) + + img, err = store.GetImage(repoName, ref) + if err != nil { + return nil, err + } + + if img != nil { + return img, err + } + + // name must be an image ID. + store.Lock() + defer store.Unlock() + if img, err = store.graph.Get(name); err != nil { + return nil, err + } + + return img, nil +} + +// Return a reverse-lookup table of all the names which refer to each image +// Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} +func (store *TagStore) ByID() map[string][]string { + store.Lock() + defer store.Unlock() + byID := make(map[string][]string) + for repoName, repository := range store.Repositories { + for tag, id := range repository { + name := utils.ImageReference(repoName, tag) + if _, exists := byID[id]; !exists { + byID[id] = []string{name} + } else { + byID[id] = append(byID[id], name) + sort.Strings(byID[id]) + } + } + } + return byID +} + +func (store *TagStore) ImageName(id string) string { + if names, exists := store.ByID()[id]; exists && len(names) > 0 { + return names[0] + } + return stringid.TruncateID(id) +} + +func (store *TagStore) DeleteAll(id string) error { + names, exists := store.ByID()[id] + if !exists || len(names) == 0 { + return nil + } + for _, name := range names { + if strings.Contains(name, ":") { + nameParts := strings.Split(name, ":") + if _, err := store.Delete(nameParts[0], nameParts[1]); err != nil { + return err + } + } else { + if _, err := store.Delete(name, ""); err != nil { + return err + } + } + } + return nil +} + +func (store *TagStore) Delete(repoName, ref string) (bool, error) { + store.Lock() + defer store.Unlock() + deleted := false + if err := store.reload(); err != nil { + return false, err + } + + repoName = registry.NormalizeLocalName(repoName) + + if ref == "" { + // Delete the whole repository. + delete(store.Repositories, repoName) + return true, store.save() + } + + repoRefs, exists := store.Repositories[repoName] + if !exists { + return false, fmt.Errorf("No such repository: %s", repoName) + } + + if _, exists := repoRefs[ref]; exists { + delete(repoRefs, ref) + if len(repoRefs) == 0 { + delete(store.Repositories, repoName) + } + deleted = true + } + + return deleted, store.save() +} + +func (store *TagStore) Tag(repoName, tag, imageName string, force bool) error { + return store.SetLoad(repoName, tag, imageName, force, nil) +} + +func (store *TagStore) SetLoad(repoName, tag, imageName string, force bool, out io.Writer) error { + img, err := store.LookupImage(imageName) + store.Lock() + defer store.Unlock() + if err != nil { + return err + } + if tag == "" { + tag = tags.DEFAULTTAG + } + if err := validateRepoName(repoName); err != nil { + return err + } + if err := tags.ValidateTagName(tag); err != nil { + if _, formatError := err.(tags.ErrTagInvalidFormat); !formatError { + return err + } + if _, dErr := digest.ParseDigest(tag); dErr != nil { + // Still return the tag validation error. + // It's more likely to be a user generated issue. + return err + } + } + if err := store.reload(); err != nil { + return err + } + var repo Repository + repoName = registry.NormalizeLocalName(repoName) + if r, exists := store.Repositories[repoName]; exists { + repo = r + if old, exists := store.Repositories[repoName][tag]; exists { + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", tag, old) + } + + if old != img.ID && out != nil { + + fmt.Fprintf(out, "The image %s:%s already exists, renaming the old one with ID %s to empty string\n", repoName, tag, old[:12]) + + } + } + } else { + repo = make(map[string]string) + store.Repositories[repoName] = repo + } + repo[tag] = img.ID + return store.save() +} + +// SetDigest creates a digest reference to an image ID. +func (store *TagStore) SetDigest(repoName, digest, imageName string) error { + img, err := store.LookupImage(imageName) + if err != nil { + return err + } + + if err := validateRepoName(repoName); err != nil { + return err + } + + if err := validateDigest(digest); err != nil { + return err + } + + store.Lock() + defer store.Unlock() + if err := store.reload(); err != nil { + return err + } + + repoName = registry.NormalizeLocalName(repoName) + repoRefs, exists := store.Repositories[repoName] + if !exists { + repoRefs = Repository{} + store.Repositories[repoName] = repoRefs + } else if oldID, exists := repoRefs[digest]; exists && oldID != img.ID { + return fmt.Errorf("Conflict: Digest %s is already set to image %s", digest, oldID) + } + + repoRefs[digest] = img.ID + return store.save() +} + +func (store *TagStore) Get(repoName string) (Repository, error) { + store.Lock() + defer store.Unlock() + if err := store.reload(); err != nil { + return nil, err + } + repoName = registry.NormalizeLocalName(repoName) + if r, exists := store.Repositories[repoName]; exists { + return r, nil + } + return nil, nil +} + +func (store *TagStore) GetImage(repoName, refOrID string) (*image.Image, error) { + repo, err := store.Get(repoName) + + if err != nil { + return nil, err + } + if repo == nil { + return nil, nil + } + + store.Lock() + defer store.Unlock() + if imgID, exists := repo[refOrID]; exists { + return store.graph.Get(imgID) + } + + // If no matching tag is found, search through images for a matching image id + // iff it looks like a short ID or would look like a short ID + if stringid.IsShortID(stringid.TruncateID(refOrID)) { + for _, revision := range repo { + if strings.HasPrefix(revision, refOrID) { + return store.graph.Get(revision) + } + } + } + + return nil, nil +} + +func (store *TagStore) GetRepoRefs() map[string][]string { + store.Lock() + reporefs := make(map[string][]string) + + for name, repository := range store.Repositories { + for tag, id := range repository { + shortID := stringid.TruncateID(id) + reporefs[shortID] = append(reporefs[shortID], utils.ImageReference(name, tag)) + } + } + store.Unlock() + return reporefs +} + +// Validate the name of a repository +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == "scratch" { + return fmt.Errorf("'scratch' is a reserved name") + } + return nil +} + +func validateDigest(dgst string) error { + if dgst == "" { + return errors.New("digest can't be empty") + } + if _, err := digest.ParseDigest(dgst); err != nil { + return err + } + return nil +} + +func (store *TagStore) poolAdd(kind, key string) (chan struct{}, error) { + store.Lock() + defer store.Unlock() + + if c, exists := store.pullingPool[key]; exists { + return c, fmt.Errorf("pull %s is already in progress", key) + } + if c, exists := store.pushingPool[key]; exists { + return c, fmt.Errorf("push %s is already in progress", key) + } + + c := make(chan struct{}) + switch kind { + case "pull": + store.pullingPool[key] = c + case "push": + store.pushingPool[key] = c + default: + return nil, fmt.Errorf("Unknown pool type") + } + return c, nil +} + +func (store *TagStore) poolRemove(kind, key string) error { + store.Lock() + defer store.Unlock() + switch kind { + case "pull": + if c, exists := store.pullingPool[key]; exists { + close(c) + delete(store.pullingPool, key) + } + case "push": + if c, exists := store.pushingPool[key]; exists { + close(c) + delete(store.pushingPool, key) + } + default: + return fmt.Errorf("Unknown pool type") + } + return nil +} diff --git a/graph/tags/tags.go b/graph/tags/tags.go new file mode 100644 index 00000000..cbd0f6bc --- /dev/null +++ b/graph/tags/tags.go @@ -0,0 +1,29 @@ +package tags + +import ( + "fmt" + + "github.com/docker/distribution/registry/api/v2" +) + +const DEFAULTTAG = "latest" + +type ErrTagInvalidFormat struct { + name string +} + +func (e ErrTagInvalidFormat) Error() string { + return fmt.Sprintf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed ('.' and '-' are NOT allowed in the initial), minimum 1, maximum 128 in length", e.name) +} + +// ValidateTagName validates the name of a tag +func ValidateTagName(name string) error { + if name == "" { + return fmt.Errorf("tag name can't be empty") + } + + if !v2.TagNameAnchoredRegexp.MatchString(name) { + return ErrTagInvalidFormat{name} + } + return nil +} diff --git a/graph/tags/tags_unit_test.go b/graph/tags/tags_unit_test.go new file mode 100644 index 00000000..5114da10 --- /dev/null +++ b/graph/tags/tags_unit_test.go @@ -0,0 +1,23 @@ +package tags + +import ( + "testing" +) + +func TestValidTagName(t *testing.T) { + validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err != nil { + t.Errorf("'%s' should've been a valid tag", tag) + } + } +} + +func TestInvalidTagName(t *testing.T) { + validTags := []string{"-9", ".foo", "-test", ".", "-"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err == nil { + t.Errorf("'%s' shouldn't have been a valid tag", tag) + } + } +} diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go new file mode 100644 index 00000000..c5f2ed11 --- /dev/null +++ b/graph/tags_unit_test.go @@ -0,0 +1,204 @@ +package graph + +import ( + "archive/tar" + "bytes" + "io" + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/graphdriver" + _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests + "github.com/docker/docker/image" + "github.com/docker/docker/utils" +) + +const ( + testOfficialImageName = "myapp" + testOfficialImageID = "1a2d3c4d4e5fa2d2a21acea242a5e2345d3aefc3e7dfa2a2a2a21a2a2ad2d234" + testOfficialImageIDShort = "1a2d3c4d4e5f" + testPrivateImageName = "127.0.0.1:8000/privateapp" + testPrivateImageID = "5bc255f8699e4ee89ac4469266c3d11515da88fdcbde45d7b069b636ff4efd81" + testPrivateImageIDShort = "5bc255f8699e" + testPrivateImageDigest = "sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb" + testPrivateImageTag = "sometag" +) + +func fakeTar() (io.Reader, error) { + uid := os.Getuid() + gid := os.Getgid() + + content := []byte("Hello world!\n") + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { + hdr := new(tar.Header) + + // Leaving these fields blank requires root privileges + hdr.Uid = uid + hdr.Gid = gid + + hdr.Size = int64(len(content)) + hdr.Name = name + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + tw.Write([]byte(content)) + } + tw.Close() + return buf, nil +} + +func mkTestTagStore(root string, t *testing.T) *TagStore { + driver, err := graphdriver.New(root, nil) + if err != nil { + t.Fatal(err) + } + graph, err := NewGraph(root, driver) + if err != nil { + t.Fatal(err) + } + + tagCfg := &TagStoreConfig{ + Graph: graph, + Events: events.New(), + } + store, err := NewTagStore(path.Join(root, "tags"), tagCfg) + if err != nil { + t.Fatal(err) + } + officialArchive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img := &image.Image{ID: testOfficialImageID} + if err := graph.Register(v1ImageDescriptor{img}, officialArchive); err != nil { + t.Fatal(err) + } + if err := store.Tag(testOfficialImageName, "", testOfficialImageID, false); err != nil { + t.Fatal(err) + } + privateArchive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img = &image.Image{ID: testPrivateImageID} + if err := graph.Register(v1ImageDescriptor{img}, privateArchive); err != nil { + t.Fatal(err) + } + if err := store.Tag(testPrivateImageName, "", testPrivateImageID, false); err != nil { + t.Fatal(err) + } + if err := store.SetDigest(testPrivateImageName, testPrivateImageDigest, testPrivateImageID); err != nil { + t.Fatal(err) + } + return store +} + +func TestLookupImage(t *testing.T) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + store := mkTestTagStore(tmp, t) + defer store.graph.driver.Cleanup() + + officialLookups := []string{ + testOfficialImageID, + testOfficialImageIDShort, + testOfficialImageName + ":" + testOfficialImageID, + testOfficialImageName + ":" + testOfficialImageIDShort, + testOfficialImageName, + testOfficialImageName + ":" + DEFAULTTAG, + "docker.io/" + testOfficialImageName, + "docker.io/" + testOfficialImageName + ":" + DEFAULTTAG, + "index.docker.io/" + testOfficialImageName, + "index.docker.io/" + testOfficialImageName + ":" + DEFAULTTAG, + "library/" + testOfficialImageName, + "library/" + testOfficialImageName + ":" + DEFAULTTAG, + "docker.io/library/" + testOfficialImageName, + "docker.io/library/" + testOfficialImageName + ":" + DEFAULTTAG, + "index.docker.io/library/" + testOfficialImageName, + "index.docker.io/library/" + testOfficialImageName + ":" + DEFAULTTAG, + } + + privateLookups := []string{ + testPrivateImageID, + testPrivateImageIDShort, + testPrivateImageName + ":" + testPrivateImageID, + testPrivateImageName + ":" + testPrivateImageIDShort, + testPrivateImageName, + testPrivateImageName + ":" + DEFAULTTAG, + } + + invalidLookups := []string{ + testOfficialImageName + ":" + "fail", + "fail:fail", + } + + digestLookups := []string{ + testPrivateImageName + "@" + testPrivateImageDigest, + } + + for _, name := range officialLookups { + if img, err := store.LookupImage(name); err != nil { + t.Errorf("Error looking up %s: %s", name, err) + } else if img == nil { + t.Errorf("Expected 1 image, none found: %s", name) + } else if img.ID != testOfficialImageID { + t.Errorf("Expected ID '%s' found '%s'", testOfficialImageID, img.ID) + } + } + + for _, name := range privateLookups { + if img, err := store.LookupImage(name); err != nil { + t.Errorf("Error looking up %s: %s", name, err) + } else if img == nil { + t.Errorf("Expected 1 image, none found: %s", name) + } else if img.ID != testPrivateImageID { + t.Errorf("Expected ID '%s' found '%s'", testPrivateImageID, img.ID) + } + } + + for _, name := range invalidLookups { + if img, err := store.LookupImage(name); err == nil { + t.Errorf("Expected error, none found: %s", name) + } else if img != nil { + t.Errorf("Expected 0 image, 1 found: %s", name) + } + } + + for _, name := range digestLookups { + if img, err := store.LookupImage(name); err != nil { + t.Errorf("Error looking up %s: %s", name, err) + } else if img == nil { + t.Errorf("Expected 1 image, none found: %s", name) + } else if img.ID != testPrivateImageID { + t.Errorf("Expected ID '%s' found '%s'", testPrivateImageID, img.ID) + } + } +} + +func TestValidateDigest(t *testing.T) { + tests := []struct { + input string + expectError bool + }{ + {"", true}, + {"latest", true}, + {"sha256:b", false}, + {"tarsum+v1+sha256:bY852-_.+=", false}, + {"#$%#$^:$%^#$%", true}, + } + + for i, test := range tests { + err := validateDigest(test.input) + gotError := err != nil + if e, a := test.expectError, gotError; e != a { + t.Errorf("%d: with input %s, expected error=%t, got %t: %s", i, test.input, test.expectError, gotError, err) + } + } +} diff --git a/hack/.vendor-helpers.sh b/hack/.vendor-helpers.sh new file mode 100755 index 00000000..9b8b1a36 --- /dev/null +++ b/hack/.vendor-helpers.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +# Downloads dependencies into vendor/ directory +mkdir -p vendor + +rm -rf .gopath +mkdir -p .gopath/src/github.com/docker +ln -sf ../../../.. .gopath/src/github.com/docker/docker +export GOPATH="${PWD}/.gopath:${PWD}/vendor" + +clone() { + local vcs="$1" + local pkg="$2" + local rev="$3" + local url="$4" + + : ${url:=https://$pkg} + local target="vendor/src/$pkg" + + echo -n "$pkg @ $rev: " + + if [ -d "$target" ]; then + echo -n 'rm old, ' + rm -rf "$target" + fi + + echo -n 'clone, ' + case "$vcs" in + git) + git clone --quiet --no-checkout "$url" "$target" + ( cd "$target" && git reset --quiet --hard "$rev" ) + ;; + hg) + hg clone --quiet --updaterev "$rev" "$url" "$target" + ;; + esac + + echo -n 'rm VCS, ' + ( cd "$target" && rm -rf .{git,hg} ) + + echo -n 'rm vendor, ' + ( cd "$target" && rm -rf vendor Godeps/_workspace ) + + echo done +} + +# get an ENV from the Dockerfile with support for multiline values +_dockerfile_env() { + local e="$1" + awk ' + $1 == "ENV" && $2 == "'"$e"'" { + sub(/^ENV +([^ ]+) +/, ""); + inEnv = 1; + } + inEnv { + if (sub(/\\$/, "")) { + printf "%s", $0; + next; + } + print; + exit; + } + ' Dockerfile +} + +clean() { + local packages=( + github.com/docker/docker/docker # package main + github.com/docker/docker/dockerinit # package main + github.com/docker/docker/integration-cli # external tests + ) + + local dockerPlatforms=( linux/amd64 windows/amd64 $(_dockerfile_env DOCKER_CROSSPLATFORMS) ) + local dockerBuildTags="$(_dockerfile_env DOCKER_BUILDTAGS)" + local buildTagCombos=( + '' + 'experimental' + "$dockerBuildTags" + "daemon $dockerBuildTags" + "daemon cgo $dockerBuildTags" + "experimental $dockerBuildTags" + "experimental daemon $dockerBuildTags" + "experimental daemon cgo $dockerBuildTags" + ) + + echo + + echo -n 'collecting import graph, ' + local IFS=$'\n' + local imports=( $( + for platform in "${dockerPlatforms[@]}"; do + export GOOS="${platform%/*}"; + export GOARCH="${platform##*/}"; + for buildTags in "${buildTagCombos[@]}"; do + go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}" + done + done | grep -vE '^github.com/docker/docker' | sort -u + ) ) + imports=( $(go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' "${imports[@]}") ) + unset IFS + + echo -n 'pruning unused packages, ' + findArgs=( + # This directory contains only .c and .h files which are necessary + -path vendor/src/github.com/mattn/go-sqlite3/code + ) + for import in "${imports[@]}"; do + [ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or ) + findArgs+=( -path "vendor/src/$import" ) + done + local IFS=$'\n' + local prune=( $(find vendor -depth -type d -not '(' "${findArgs[@]}" ')') ) + unset IFS + for dir in "${prune[@]}"; do + find "$dir" -maxdepth 1 -not -type d -not -name 'LICENSE*' -not -name 'COPYING*' -exec rm -v -f '{}' + + rmdir "$dir" 2>/dev/null || true + done + + echo -n 'pruning unused files, ' + find vendor -type f -name '*_test.go' -exec rm -v '{}' + + + echo done +} diff --git a/hack/dind b/hack/dind new file mode 100755 index 00000000..9289ba65 --- /dev/null +++ b/hack/dind @@ -0,0 +1,112 @@ +#!/bin/bash +set -e + +# DinD: a wrapper script which allows docker to be run inside a docker container. +# Original version by Jerome Petazzoni +# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ +# +# This script should be executed inside a docker container in privilieged mode +# ('docker run --privileged', introduced in docker 0.6). + +# Usage: dind CMD [ARG...] + +# apparmor sucks and Docker needs to know that it's in a container (c) @tianon +export container=docker + +# First, make sure that cgroups are mounted correctly. +CGROUP=/cgroup + +mkdir -p "$CGROUP" + +if ! mountpoint -q "$CGROUP"; then + mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { + echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' + exit 1 + } +fi + +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and -privileged mode might break.' + } +fi + +# Mount the cgroup hierarchies exactly as they are in the parent system. +for HIER in $(cut -d: -f2 /proc/1/cgroup); do + + # The following sections address a bug which manifests itself + # by a cryptic "lxc-start: no ns_cgroup option specified" when + # trying to start containers within a container. + # The bug seems to appear when the cgroup hierarchies are not + # mounted on the exact same directories in the host, and in the + # container. + + SUBSYSTEMS="${HIER%name=*}" + + # If cgroup hierarchy is named(mounted with "-o name=foo") we + # need to mount it in $CGROUP/foo to create exect same + # directoryes as on host. Else we need to mount it as is e.g. + # "subsys1,subsys2" if it has two subsystems + + # Named, control-less cgroups are mounted with "-o name=foo" + # (and appear as such under /proc//cgroup) but are usually + # mounted on a directory named "foo" (without the "name=" prefix). + # Systemd and OpenRC (and possibly others) both create such a + # cgroup. So just mount them on directory $CGROUP/foo. + + OHIER=$HIER + HIER="${HIER#*name=}" + + mkdir -p "$CGROUP/$HIER" + + if ! mountpoint -q "$CGROUP/$HIER"; then + mount -n -t cgroup -o "$OHIER" cgroup "$CGROUP/$HIER" + fi + + # Likewise, on at least one system, it has been reported that + # systemd would mount the CPU and CPU accounting controllers + # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" + # but on a directory called "cpu,cpuacct" (note the inversion + # in the order of the groups). This tries to work around it. + + if [ "$HIER" = 'cpuacct,cpu' ]; then + ln -s "$HIER" "$CGROUP/cpu,cpuacct" + fi + + # If hierarchy has multiple subsystems, in /proc//cgroup + # we will see ":subsys1,subsys2,subsys3,name=foo:" substring, + # we need to mount it to "$CGROUP/foo" and if there were no + # name to "$CGROUP/subsys1,subsys2,subsys3", so we must create + # symlinks for docker daemon to find these subsystems: + # ln -s $CGROUP/foo $CGROUP/subsys1 + # ln -s $CGROUP/subsys1,subsys2,subsys3 $CGROUP/subsys1 + + if [ "$SUBSYSTEMS" != "${SUBSYSTEMS//,/ }" ]; then + SUBSYSTEMS="${SUBSYSTEMS//,/ }" + for SUBSYS in $SUBSYSTEMS + do + ln -s "$CGROUP/$HIER" "$CGROUP/$SUBSYS" + done + fi +done + +# Note: as I write those lines, the LXC userland tools cannot setup +# a "sub-container" properly if the "devices" cgroup is not in its +# own hierarchy. Let's detect this and issue a warning. +if ! grep -q :devices: /proc/1/cgroup; then + echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' +fi +if ! grep -qw devices /proc/1/cgroup; then + echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' +fi + +# Mount /tmp +mount -t tmpfs none /tmp + +if [ $# -gt 0 ]; then + exec "$@" +fi + +echo >&2 'ERROR: No command specified.' +echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff --git a/hack/generate-authors.sh b/hack/generate-authors.sh new file mode 100755 index 00000000..e78a97f9 --- /dev/null +++ b/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf +} > AUTHORS diff --git a/hack/install.sh b/hack/install.sh new file mode 100755 index 00000000..748ad321 --- /dev/null +++ b/hack/install.sh @@ -0,0 +1,372 @@ +#!/bin/sh +set -e +# +# This script is meant for quick & easy install via: +# 'curl -sSL https://get.docker.com/ | sh' +# or: +# 'wget -qO- https://get.docker.com/ | sh' +# +# For test builds (ie. release candidates): +# 'curl -sSL https://test.docker.com/ | sh' +# or: +# 'wget -qO- https://test.docker.com/ | sh' +# +# For experimental builds: +# 'curl -sSL https://experimental.docker.com/ | sh' +# or: +# 'wget -qO- https://experimental.docker.com/ | sh' +# +# Docker Maintainers: +# To update this script on https://get.docker.com, +# use hack/release.sh during a normal release, +# or the following one-liner for script hotfixes: +# s3cmd put --acl-public -P hack/install.sh s3://get.docker.com/index +# + +url='https://get.docker.com/' + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +echo_docker_as_nonroot() { + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + cat <<-EOF + + If you would like to use Docker as a non-root user, you should now consider + adding your user to the "docker" group with something like: + + sudo usermod -aG docker $your_user + + Remember that you will have to log out and back in for this to take effect! + + EOF +} + +# Check if this is a forked Linux distro +check_forked() { + # Check for lsb_release command existence, it usually exists in forked distros + if command_exists lsb_release; then + # Check if the `-u` option is supported + lsb_release -a -u > /dev/null 2>&1 + + # Check if the command has exited successfully, it means we're in a forked distro + if [ "$?" = "0" ]; then + # Print info about current distro + cat <<-EOF + You're using '$lsb_dist' version '$dist_version'. + EOF + + # Get the upstream release info + lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]') + dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]') + + # Print info about upstream distro + cat <<-EOF + Upstream release is '$lsb_dist' version '$dist_version'. + EOF + fi + fi +} + +do_install() { + case "$(uname -m)" in + *64) + ;; + *) + cat >&2 <<-'EOF' + Error: you are not using a 64bit platform. + Docker currently only supports 64bit platforms. + EOF + exit 1 + ;; + esac + + if command_exists docker; then + cat >&2 <<-'EOF' + Warning: the "docker" command appears to already exist on this system. + + If you already have Docker installed, this script can cause trouble, which is + why we're displaying this warning and provide the opportunity to cancel the + installation. + + If you installed the current Docker package using this script and are using it + again to update Docker, you can safely ignore this message. + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 + fi + fi + + curl='' + if command_exists curl; then + curl='curl -sSL' + elif command_exists wget; then + curl='wget -qO-' + elif command_exists busybox && busybox --list-modules | grep -q wget; then + curl='busybox wget -qO-' + fi + + # check to see which repo they are trying to install from + repo='main' + if [ "https://test.docker.com/" = "$url" ]; then + repo='testing' + elif [ "https://experimental.docker.com/" = "$url" ]; then + repo='experimental' + fi + + # perform some very rudimentary platform detection + lsb_dist='' + dist_version='' + if command_exists lsb_release; then + lsb_dist="$(lsb_release -si)" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then + lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then + lsb_dist='debian' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then + lsb_dist='fedora' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then + lsb_dist='oracleserver' + fi + if [ -z "$lsb_dist" ]; then + if [ -r /etc/centos-release ] || [ -r /etc/redhat-release ]; then + lsb_dist='centos' + fi + fi + if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + + case "$lsb_dist" in + + ubuntu) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then + dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" + fi + ;; + + debian) + dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" + case "$dist_version" in + 8) + dist_version="jessie" + ;; + 7) + dist_version="wheezy" + ;; + esac + ;; + + oracleserver) + # need to switch lsb_dist to match yum repo URL + lsb_dist="oraclelinux" + dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//')" + ;; + + fedora|centos) + dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//')" + ;; + + *) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + + esac + + # Check if this is a forked Linux distro + check_forked + + # Run setup for each distro accordingly + case "$lsb_dist" in + amzn) + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker' + ) + echo_docker_as_nonroot + exit 0 + ;; + + 'opensuse project'|opensuse|'suse linux'|sle[sd]) + ( + set -x + $sh_c 'sleep 3; zypper -n install docker' + ) + echo_docker_as_nonroot + exit 0 + ;; + + ubuntu|debian) + export DEBIAN_FRONTEND=noninteractive + + did_apt_get_update= + apt_get_update() { + if [ -z "$did_apt_get_update" ]; then + ( set -x; $sh_c 'sleep 3; apt-get update' ) + did_apt_get_update=1 + fi + } + + # aufs is preferred over devicemapper; try to ensure the driver is available. + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -q '^ii' 2>/dev/null; then + kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" + + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true + + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' + echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' + ( set -x; sleep 10 ) + fi + else + echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' + echo >&2 ' package. We have no AUFS support. Consider installing the packages' + echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' + ( set -x; sleep 10 ) + fi + fi + + # install apparmor utils if they're missing and apparmor is enabled in the kernel + # otherwise Docker will fail to start + if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + if command -v apparmor_parser >/dev/null 2>&1; then + echo 'apparmor is enabled in the kernel and apparmor utils were already installed' + else + echo 'apparmor is enabled in the kernel, but apparmor_parser missing' + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) + fi + fi + + if [ ! -e /usr/lib/apt/methods/https ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) + fi + if [ -z "$curl" ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) + curl='curl -sSL' + fi + ( + set -x + $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D" + $sh_c "mkdir -p /etc/apt/sources.list.d" + $sh_c "echo deb https://apt.dockerproject.org/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list" + $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine' + ) + echo_docker_as_nonroot + exit 0 + ;; + + fedora|centos|oraclelinux) + $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF + [docker-${repo}-repo] + name=Docker ${repo} Repository + baseurl=https://yum.dockerproject.org/repo/${repo}/${lsb_dist}/${dist_version} + enabled=1 + gpgcheck=1 + gpgkey=https://yum.dockerproject.org/gpg + EOF + if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then + ( + set -x + $sh_c 'sleep 3; dnf -y -q install docker-engine' + ) + else + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-engine' + ) + fi + echo_docker_as_nonroot + exit 0 + ;; + gentoo) + if [ "$url" = "https://test.docker.com/" ]; then + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output + cat >&2 <<-'EOF' + + You appear to be trying to install the latest nightly build in Gentoo.' + The portage tree should contain the latest stable release of Docker, but' + if you want something more recent, you can always use the live ebuild' + provided in the "docker" overlay available via layman. For more' + instructions, please see the following URL:' + + https://github.com/tianon/docker-overlay#using-this-overlay' + + After adding the "docker" overlay, you should be able to:' + + emerge -av =app-emulation/docker-9999' + + EOF + exit 1 + fi + + ( + set -x + $sh_c 'sleep 3; emerge app-emulation/docker' + ) + exit 0 + ;; + esac + + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output + cat >&2 <<-'EOF' + + Either your platform is not easily detectable, is not supported by this + installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have + a package for Docker. Please visit the following URL for more detailed + installation instructions: + + https://docs.docker.com/en/latest/installation/ + + EOF + exit 1 +} + +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/hack/make.sh b/hack/make.sh new file mode 100755 index 00000000..2e99cfb2 --- /dev/null +++ b/hack/make.sh @@ -0,0 +1,319 @@ +#!/usr/bin/env bash +set -e + +# This script builds various binary artifacts from a checkout of the docker +# source code. +# +# Requirements: +# - The current directory should be a checkout of the docker source code +# (https://github.com/docker/docker). Whatever version is checked out +# will be built. +# - The VERSION file, at the root of the repository, should exist, and +# will be used as Docker binary version and package version. +# - The hash of the git commit will also be included in the Docker binary, +# with the suffix -dirty if the repository isn't clean. +# - The script is intented to be run inside the docker container specified +# in the Dockerfile at the root of the source. In other words: +# DO NOT CALL THIS SCRIPT DIRECTLY. +# - The right way to call this script is to invoke "make" from +# your checkout of the Docker repository. +# the Makefile will do a "docker build -t docker ." and then +# "docker run hack/make.sh" in the resulting image. +# + +set -o pipefail + +export DOCKER_PKG='github.com/docker/docker' +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export MAKEDIR="$SCRIPTDIR/make" + +# We're a nice, sexy, little shell script, and people might try to run us; +# but really, they shouldn't. We want to be in a container! +if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then + { + echo "# WARNING! I don't seem to be running in the Docker container." + echo "# The result of this command might be an incorrect build, and will not be" + echo "# officially supported." + echo "#" + echo "# Try this instead: make all" + echo "#" + } >&2 +fi + +echo + +# List of bundles to create when no argument is passed +DEFAULT_BUNDLES=( + validate-dco + validate-gofmt + validate-lint + validate-pkg + validate-test + validate-toml + validate-vet + + binary + + test-unit + test-integration-cli + test-docker-py + + dynbinary + + cover + cross + tgz + ubuntu +) + +VERSION=$(< ./VERSION) +if command -v git &> /dev/null && git rev-parse &> /dev/null; then + GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + GITCOMMIT="$GITCOMMIT-dirty" + fi + BUILDTIME=$(date -u) +elif [ "$DOCKER_GITCOMMIT" ]; then + GITCOMMIT="$DOCKER_GITCOMMIT" +else + echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' + echo >&2 ' Please either build with the .git directory accessible, or specify the' + echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' + echo >&2 ' future accountability in diagnosing build issues. Thanks!' + exit 1 +fi + +if [ "$AUTO_GOPATH" ]; then + rm -rf .gopath + mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" + ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" + export GOPATH="${PWD}/.gopath:${PWD}/vendor" +fi + +if [ ! "$GOPATH" ]; then + echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH' + echo >&2 ' alternatively, set AUTO_GOPATH=1' + exit 1 +fi + +if [ "$DOCKER_EXPERIMENTAL" ]; then + echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features' + echo >&2 + DOCKER_BUILDTAGS+=" experimental" +fi + +if [ -z "$DOCKER_CLIENTONLY" ]; then + DOCKER_BUILDTAGS+=" daemon" +fi + +if [ "$DOCKER_EXECDRIVER" = 'lxc' ]; then + DOCKER_BUILDTAGS+=' test_no_exec' +fi + +# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately +if \ + command -v gcc &> /dev/null \ + && ! gcc -E - &> /dev/null <<<'#include ' \ +; then + DOCKER_BUILDTAGS+=' btrfs_noversion' +fi + +# test whether "libdevmapper.h" is new enough to support deferred remove +# functionality. +if \ + command -v gcc &> /dev/null \ + && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -ldevmapper -xc - &> /dev/null ) \ +; then + DOCKER_BUILDTAGS+=' libdm_no_deferred_remove' +fi + +# Use these flags when compiling the tests and final binary + +IAMSTATIC='true' +source "$SCRIPTDIR/make/.go-autogen" +if [ -z "$DOCKER_DEBUG" ]; then + LDFLAGS='-w' +fi + +LDFLAGS_STATIC='-linkmode external' +# Cgo -H windows is incompatible with -linkmode external. +if [ "$(go env GOOS)" == 'windows' ]; then + LDFLAGS_STATIC='' +fi +EXTLDFLAGS_STATIC='-static' +# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build +# with options like -race. +ORIG_BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" -installsuffix netgo ) +# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here +BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) +# Test timeout. +: ${TIMEOUT:=60m} +TESTFLAGS+=" -test.timeout=${TIMEOUT}" + +# A few more flags that are specific just to building a completely-static binary (see hack/make/binary) +# PLEASE do not use these anywhere else. +EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files" +LDFLAGS_STATIC_DOCKER=" + $LDFLAGS_STATIC + -extldflags \"$EXTLDFLAGS_STATIC_DOCKER\" +" + +if [ "$(uname -s)" = 'FreeBSD' ]; then + # Tell cgo the compiler is Clang, not GCC + # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 + export CC=clang + + # "-extld clang" is a workaround for + # https://code.google.com/p/go/issues/detail?id=6845 + LDFLAGS="$LDFLAGS -extld clang" +fi + +# If sqlite3.h doesn't exist under /usr/include, +# check /usr/local/include also just in case +# (e.g. FreeBSD Ports installs it under the directory) +if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then + export CGO_CFLAGS='-I/usr/local/include' + export CGO_LDFLAGS='-L/usr/local/lib' +fi + +HAVE_GO_TEST_COVER= +if \ + go help testflag | grep -- -cover > /dev/null \ + && go tool -n cover > /dev/null 2>&1 \ +; then + HAVE_GO_TEST_COVER=1 +fi + +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, eg. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want +# to run certain tests on your local host, you should run with command: +# +# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli +# +go_test_dir() { + dir=$1 + coverpkg=$2 + testcover=() + if [ "$HAVE_GO_TEST_COVER" ]; then + # if our current go install has -cover, we want to use it :) + mkdir -p "$DEST/coverprofiles" + coverprofile="docker${dir#.}" + coverprofile="$ABS_DEST/coverprofiles/${coverprofile//\//-}" + testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) + fi + ( + echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" + cd "$dir" + export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up + test_env go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS + ) +} +test_env() { + # use "env -i" to tightly control the environment variables that bleed into the tests + env -i \ + DEST="$DEST" \ + DOCKER_EXECDRIVER="$DOCKER_EXECDRIVER" \ + DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ + DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \ + DOCKER_HOST="$DOCKER_HOST" \ + GOPATH="$GOPATH" \ + HOME="$ABS_DEST/fake-HOME" \ + PATH="$PATH" \ + TEMP="$TEMP" \ + TEST_DOCKERINIT_PATH="$TEST_DOCKERINIT_PATH" \ + "$@" +} + +# a helper to provide ".exe" when it's appropriate +binary_extension() { + if [ "$(go env GOOS)" = 'windows' ]; then + echo -n '.exe' + fi +} + +# This helper function walks the current directory looking for directories +# holding certain files ($1 parameter), and prints their paths on standard +# output, one per line. +find_dirs() { + find . -not \( \ + \( \ + -path './vendor/*' \ + -o -path './integration-cli/*' \ + -o -path './contrib/*' \ + -o -path './pkg/mflag/example/*' \ + -o -path './.git/*' \ + -o -path './bundles/*' \ + -o -path './docs/*' \ + -o -path './pkg/libcontainer/nsinit/*' \ + \) \ + -prune \ + \) -name "$1" -print0 | xargs -0n1 dirname | sort -u +} + +hash_files() { + while [ $# -gt 0 ]; do + f="$1" + shift + dir="$(dirname "$f")" + base="$(basename "$f")" + for hashAlgo in md5 sha256; do + if command -v "${hashAlgo}sum" &> /dev/null; then + ( + # subshell and cd so that we get output files like: + # $HASH docker-$VERSION + # instead of: + # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION + cd "$dir" + "${hashAlgo}sum" "$base" > "$base.$hashAlgo" + ) + fi + done + done +} + +bundle() { + local bundle="$1"; shift + echo "---> Making bundle: $(basename "$bundle") (in $DEST)" + source "$SCRIPTDIR/make/$bundle" "$@" +} + +main() { + # We want this to fail if the bundles already exist and cannot be removed. + # This is to avoid mixing bundles from different versions of the code. + mkdir -p bundles + if [ -e "bundles/$VERSION" ]; then + echo "bundles/$VERSION already exists. Removing." + rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 + echo + fi + + if [ "$(go env GOHOSTOS)" != 'windows' ]; then + # Windows and symlinks don't get along well + + rm -f bundles/latest + ln -s "$VERSION" bundles/latest + fi + + if [ $# -lt 1 ]; then + bundles=(${DEFAULT_BUNDLES[@]}) + else + bundles=($@) + fi + for bundle in ${bundles[@]}; do + export DEST="bundles/$VERSION/$(basename "$bundle")" + # Cygdrive paths don't play well with go build -o. + if [[ "$(uname -s)" == CYGWIN* ]]; then + export DEST="$(cygpath -mw "$DEST")" + fi + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + bundle "$bundle" + echo + done +} + +main "$@" diff --git a/hack/make/.build-deb/compat b/hack/make/.build-deb/compat new file mode 100644 index 00000000..ec635144 --- /dev/null +++ b/hack/make/.build-deb/compat @@ -0,0 +1 @@ +9 diff --git a/hack/make/.build-deb/control b/hack/make/.build-deb/control new file mode 100644 index 00000000..3f783463 --- /dev/null +++ b/hack/make/.build-deb/control @@ -0,0 +1,26 @@ +Source: docker-engine +Maintainer: Docker +Homepage: https://dockerproject.org +Vcs-Browser: https://github.com/docker/docker +Vcs-Git: git://github.com/docker/docker.git + +Package: docker-engine +Architecture: linux-any +Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} +Recommends: aufs-tools, + ca-certificates, + cgroupfs-mount | cgroup-lite, + git, + xz-utils, + ${apparmor:Recommends} +Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package +Description: Docker: the open-source application container engine + Docker is an open source project to pack, ship and run any application as a + lightweight container + . + Docker containers are both hardware-agnostic and platform-agnostic. This means + they can run anywhere, from your laptop to the largest EC2 compute instance and + everything in between - and they don't require you to use a particular + language, framework or packaging system. That makes them great building blocks + for deploying and scaling web apps, databases, and backend services without + depending on a particular stack or provider. diff --git a/hack/make/.build-deb/docker-engine.bash-completion b/hack/make/.build-deb/docker-engine.bash-completion new file mode 100644 index 00000000..6ea11193 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.bash-completion @@ -0,0 +1 @@ +contrib/completion/bash/docker diff --git a/hack/make/.build-deb/docker-engine.docker.default b/hack/make/.build-deb/docker-engine.docker.default new file mode 120000 index 00000000..4278533d --- /dev/null +++ b/hack/make/.build-deb/docker-engine.docker.default @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/hack/make/.build-deb/docker-engine.docker.init b/hack/make/.build-deb/docker-engine.docker.init new file mode 120000 index 00000000..8cb89d30 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.docker.init @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/hack/make/.build-deb/docker-engine.docker.upstart b/hack/make/.build-deb/docker-engine.docker.upstart new file mode 120000 index 00000000..7e1b64a3 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.docker.upstart @@ -0,0 +1 @@ +../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/hack/make/.build-deb/docker-engine.install b/hack/make/.build-deb/docker-engine.install new file mode 100644 index 00000000..a8857a96 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.install @@ -0,0 +1,11 @@ +#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ +#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ +#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ +contrib/*-integration usr/share/docker-engine/contrib/ +contrib/check-config.sh usr/share/docker-engine/contrib/ +contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ +contrib/init/systemd/docker.service lib/systemd/system/ +contrib/init/systemd/docker.socket lib/systemd/system/ +contrib/mk* usr/share/docker-engine/contrib/ +contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ +contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ diff --git a/hack/make/.build-deb/docker-engine.manpages b/hack/make/.build-deb/docker-engine.manpages new file mode 100644 index 00000000..1aa62186 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.manpages @@ -0,0 +1 @@ +man/man*/* diff --git a/hack/make/.build-deb/docker-engine.postinst b/hack/make/.build-deb/docker-engine.postinst new file mode 100644 index 00000000..eeef6ca8 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.postinst @@ -0,0 +1,20 @@ +#!/bin/sh +set -e + +case "$1" in + configure) + if [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi + fi + ;; + abort-*) + # How'd we get here?? + exit 1 + ;; + *) + ;; +esac + +#DEBHELPER# diff --git a/hack/make/.build-deb/docker-engine.udev b/hack/make/.build-deb/docker-engine.udev new file mode 120000 index 00000000..914a3619 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.udev @@ -0,0 +1 @@ +../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/hack/make/.build-deb/docs b/hack/make/.build-deb/docs new file mode 100644 index 00000000..b43bf86b --- /dev/null +++ b/hack/make/.build-deb/docs @@ -0,0 +1 @@ +README.md diff --git a/hack/make/.build-deb/rules b/hack/make/.build-deb/rules new file mode 100755 index 00000000..b4c8e2b4 --- /dev/null +++ b/hack/make/.build-deb/rules @@ -0,0 +1,36 @@ +#!/usr/bin/make -f + +VERSION = $(shell cat VERSION) + +override_dh_gencontrol: + # if we're on Ubuntu, we need to Recommends: apparmor + echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars + dh_gencontrol + +override_dh_auto_build: + ./hack/make.sh dynbinary + # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +override_dh_auto_test: + ./bundles/$(VERSION)/dynbinary/docker -v + +override_dh_strip: + # the SHA1 of dockerinit is important: don't strip it + # also, Go has lots of problems with stripping, so just don't + +override_dh_auto_install: + mkdir -p debian/docker-engine/usr/bin + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/docker)" debian/docker-engine/usr/bin/docker + mkdir -p debian/docker-engine/usr/lib/docker + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/dockerinit)" debian/docker-engine/usr/lib/docker/dockerinit + +override_dh_installinit: + # use "docker" as our service name, not "docker-engine" + dh_installinit --name=docker + +override_dh_installudev: + # match our existing priority + dh_installudev --priority=z80 + +%: + dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd) diff --git a/hack/make/.build-rpm/docker-engine.spec b/hack/make/.build-rpm/docker-engine.spec new file mode 100644 index 00000000..a559523b --- /dev/null +++ b/hack/make/.build-rpm/docker-engine.spec @@ -0,0 +1,187 @@ +Name: docker-engine +Version: %{_version} +Release: %{_release}%{?dist} +Summary: The open-source application container engine +Group: Tools/Docker + +License: ASL 2.0 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +# docker builds in a checksum of dockerinit into docker, +# # so stripping the binaries breaks docker +%global __os_install_post %{_rpmconfigdir}/brp-compress +%global debug_package %{nil} + +# is_systemd conditional +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 +%global is_systemd 1 +%endif + +# required packages for build +# most are already in the container (see contrib/builder/rpm/generate.sh) +# only require systemd on those systems +%if 0%{?is_systemd} +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +%else +Requires(post): chkconfig +Requires(preun): chkconfig +# This is for /sbin/service +Requires(preun): initscripts +%endif + +# required packages on install +Requires: /bin/sh +Requires: iptables +Requires: libcgroup +Requires: tar +Requires: xz +%if 0%{?fedora} >= 21 +# Resolves: rhbz#1165615 +Requires: device-mapper-libs >= 1.02.90-1 +%endif +%if 0%{?oraclelinux} == 6 +# Require Oracle Unbreakable Enterprise Kernel R3 and newer device-mapper +Requires: kernel-uek >= 3.8 +Requires: device-mapper >= 1.02.90-2 +%endif + +# conflicting packages +Conflicts: docker +Conflicts: docker-io + +%description +Docker is an open source project to pack, ship and run any application as a +lightweight container + +Docker containers are both hardware-agnostic and platform-agnostic. This means +they can run anywhere, from your laptop to the largest EC2 compute instance and +everything in between - and they don't require you to use a particular +language, framework or packaging system. That makes them great building blocks +for deploying and scaling web apps, databases, and backend services without +depending on a particular stack or provider. + +%prep +%if 0%{?centos} <= 6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +./hack/make.sh dynbinary +# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +%check +./bundles/%{_origversion}/dynbinary/docker -v + +%install +# install binary +install -d $RPM_BUILD_ROOT/%{_bindir} +install -p -m 755 bundles/%{_origversion}/dynbinary/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker + +# install dockerinit +install -d $RPM_BUILD_ROOT/%{_libexecdir}/docker +install -p -m 755 bundles/%{_origversion}/dynbinary/dockerinit-%{_origversion} $RPM_BUILD_ROOT/%{_libexecdir}/docker/dockerinit + +# install udev rules +install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d +install -p -m 755 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules + +# add init scripts +install -d $RPM_BUILD_ROOT/etc/sysconfig +install -d $RPM_BUILD_ROOT/%{_initddir} + + +%if 0%{?is_systemd} +install -d $RPM_BUILD_ROOT/%{_unitdir} +install -p -m 644 contrib/init/systemd/docker.service $RPM_BUILD_ROOT/%{_unitdir}/docker.service +install -p -m 644 contrib/init/systemd/docker.socket $RPM_BUILD_ROOT/%{_unitdir}/docker.socket +%endif + +install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker +install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker + +# add bash completions +install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions +install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions +install -d $RPM_BUILD_ROOT/usr/share/fish/completions +install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker +install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker +install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/completions/docker.fish + +# install manpages +install -d %{buildroot}%{_mandir}/man1 +install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1 +install -d %{buildroot}%{_mandir}/man5 +install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5 + +# add vimfiles +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax +install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt +install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim + +# add nano +install -d $RPM_BUILD_ROOT/usr/share/nano +install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc + +# list files owned by the package here +%files +/%{_bindir}/docker +/%{_libexecdir}/docker/dockerinit +/%{_sysconfdir}/udev/rules.d/80-docker.rules +%if 0%{?is_systemd} +/%{_unitdir}/docker.service +/%{_unitdir}/docker.socket +%endif +/etc/sysconfig/docker +/%{_initddir}/docker +/usr/share/bash-completion/completions/docker +/usr/share/zsh/vendor-completions/_docker +/usr/share/fish/completions/docker.fish +%doc +/%{_mandir}/man1/* +/%{_mandir}/man5/* +/usr/share/vim/vimfiles/doc/dockerfile.txt +/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +/usr/share/vim/vimfiles/syntax/dockerfile.vim +/usr/share/nano/Dockerfile.nanorc + +%post +%if 0%{?is_systemd} +%systemd_post docker +%else +# This adds the proper /etc/rc*.d links for the script +/sbin/chkconfig --add docker +%endif +if ! getent group docker > /dev/null; then + groupadd --system docker +fi + +%preun +%if 0%{?is_systemd} +%systemd_preun docker +%else +if [ $1 -eq 0 ] ; then + /sbin/service docker stop >/dev/null 2>&1 + /sbin/chkconfig --del docker +fi +%endif + +%postun +%if 0%{?is_systemd} +%systemd_postun_with_restart docker +%else +if [ "$1" -ge "1" ] ; then + /sbin/service docker condrestart >/dev/null 2>&1 || : +fi +%endif + +%changelog diff --git a/hack/make/.dockerinit b/hack/make/.dockerinit new file mode 100644 index 00000000..4a62ee1a --- /dev/null +++ b/hack/make/.dockerinit @@ -0,0 +1,33 @@ +#!/bin/bash +set -e + +IAMSTATIC="true" +source "${MAKEDIR}/.go-autogen" + +# dockerinit still needs to be a static binary, even if docker is dynamic +go build \ + -o "$DEST/dockerinit-$VERSION" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC + -extldflags \"$EXTLDFLAGS_STATIC\" + " \ + ./dockerinit + +echo "Created binary: $DEST/dockerinit-$VERSION" +ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" + +sha1sum= +if command -v sha1sum &> /dev/null; then + sha1sum=sha1sum +elif command -v shasum &> /dev/null; then + # Mac OS X - why couldn't they just use the same command name and be happy? + sha1sum=shasum +else + echo >&2 'error: cannot find sha1sum command or equivalent' + exit 1 +fi + +# sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another +export DOCKER_INITSHA1=$($sha1sum "$DEST/dockerinit-$VERSION" | cut -d' ' -f1) diff --git a/hack/make/.dockerinit-gccgo b/hack/make/.dockerinit-gccgo new file mode 100644 index 00000000..98908638 --- /dev/null +++ b/hack/make/.dockerinit-gccgo @@ -0,0 +1,31 @@ +#!/bin/bash +set -e + +IAMSTATIC="true" +source "${MAKEDIR}/.go-autogen" + +# dockerinit still needs to be a static binary, even if docker is dynamic +go build --compiler=gccgo \ + -o "$DEST/dockerinit-$VERSION" \ + "${BUILDFLAGS[@]}" \ + --gccgoflags " + -g + -Wl,--no-export-dynamic + $EXTLDFLAGS_STATIC_DOCKER + -lnetgo + " \ + ./dockerinit + +echo "Created binary: $DEST/dockerinit-$VERSION" +ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" + +sha1sum= +if command -v sha1sum &> /dev/null; then + sha1sum=sha1sum +else + echo >&2 'error: cannot find sha1sum command or equivalent' + exit 1 +fi + +# sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another +export DOCKER_INITSHA1=$($sha1sum "$DEST/dockerinit-$VERSION" | cut -d' ' -f1) diff --git a/hack/make/.ensure-emptyfs b/hack/make/.ensure-emptyfs new file mode 100644 index 00000000..e71a30ae --- /dev/null +++ b/hack/make/.ensure-emptyfs @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +if ! docker inspect emptyfs &> /dev/null; then + # let's build a "docker save" tarball for "emptyfs" + # see https://github.com/docker/docker/pull/5262 + # and also https://github.com/docker/docker/issues/4242 + dir="$DEST/emptyfs" + mkdir -p "$dir" + ( + cd "$dir" + echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories + mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + ( + cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ) + ( set -x; tar -cC "$dir" . | docker load ) + rm -rf "$dir" +fi diff --git a/hack/make/.ensure-frozen-images b/hack/make/.ensure-frozen-images new file mode 100644 index 00000000..deded80e --- /dev/null +++ b/hack/make/.ensure-frozen-images @@ -0,0 +1,38 @@ +#!/bin/bash +set -e + +# this list should match roughly what's in the Dockerfile (minus the explicit image IDs, of course) +images=( + busybox:latest + hello-world:frozen + jess/unshare:latest +) + +if ! docker inspect "${images[@]}" &> /dev/null; then + hardCodedDir='/docker-frozen-images' + if [ -d "$hardCodedDir" ]; then + ( set -x; tar -cC "$hardCodedDir" . | docker load ) + else + dir="$DEST/frozen-images" + # extract the exact "RUN download-frozen-image.sh" line from the Dockerfile itself for consistency + # NOTE: this will fail if either "curl" is not installed or if the Dockerfile is not available/readable + awk ' + $1 == "RUN" && $2 == "./contrib/download-frozen-image.sh" { + for (i = 2; i < NF; i++) + printf ( $i == "'"$hardCodedDir"'" ? "'"$dir"'" : $i ) " "; + print $NF; + if (/\\$/) { + inCont = 1; + next; + } + } + inCont { + print; + if (!/\\$/) { + inCont = 0; + } + } + ' Dockerfile | sh -x + ( set -x; tar -cC "$dir" . | docker load ) + fi +fi diff --git a/hack/make/.ensure-httpserver b/hack/make/.ensure-httpserver new file mode 100644 index 00000000..38659eda --- /dev/null +++ b/hack/make/.ensure-httpserver @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +# Build a Go static web server on top of busybox image +# and compile it for target daemon + +dir="$DEST/httpserver" +mkdir -p "$dir" +( + cd "$dir" + GOOS=linux GOARCH=amd64 go build -o httpserver github.com/docker/docker/contrib/httpserver + cp ../../../../contrib/httpserver/Dockerfile . + docker build -qt httpserver . > /dev/null +) +rm -rf "$dir" diff --git a/hack/make/.go-autogen b/hack/make/.go-autogen new file mode 100644 index 00000000..52e2f571 --- /dev/null +++ b/hack/make/.go-autogen @@ -0,0 +1,54 @@ +#!/bin/bash + +rm -rf autogen + +mkdir -p autogen/dockerversion +cat > autogen/dockerversion/dockerversion.go < autogen/winresources/resources.go < /dev/null +fi diff --git a/hack/make/.go-compile-test-dir b/hack/make/.go-compile-test-dir new file mode 100755 index 00000000..91c438b3 --- /dev/null +++ b/hack/make/.go-compile-test-dir @@ -0,0 +1,35 @@ +#!/bin/bash +set -e + +# Compile phase run by parallel in test-unit. No support for coverpkg + +dir=$1 +in_file="$dir/$(basename "$dir").test" +out_file="$DEST/precompiled/$dir.test" +# we want to use binary_extension() here, but we can't because it's in main.sh and this file gets re-execed +if [ "$(go env GOOS)" = 'windows' ]; then + in_file+='.exe' + out_file+='.exe' +fi +testcover=() +if [ "$HAVE_GO_TEST_COVER" ]; then + # if our current go install has -cover, we want to use it :) + mkdir -p "$DEST/coverprofiles" + coverprofile="docker${dir#.}" + coverprofile="$DEST/coverprofiles/${coverprofile//\//-}" + testcover=( -cover -coverprofile "$coverprofile" ) # missing $coverpkg +fi +if [ "$BUILDFLAGS_FILE" ]; then + readarray -t BUILDFLAGS < "$BUILDFLAGS_FILE" +fi + +if ! ( + cd "$dir" + go test "${testcover[@]}" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS -c +); then + exit 1 +fi + +mkdir -p "$(dirname "$out_file")" +mv "$in_file" "$out_file" +echo "Precompiled: ${DOCKER_PKG}${dir#.}" diff --git a/hack/make/.integration-daemon-setup b/hack/make/.integration-daemon-setup new file mode 100644 index 00000000..ab9d45c3 --- /dev/null +++ b/hack/make/.integration-daemon-setup @@ -0,0 +1,5 @@ +#!/bin/bash + +bundle .ensure-emptyfs +bundle .ensure-frozen-images +bundle .ensure-httpserver diff --git a/hack/make/.integration-daemon-start b/hack/make/.integration-daemon-start new file mode 100644 index 00000000..dcc09fa9 --- /dev/null +++ b/hack/make/.integration-daemon-start @@ -0,0 +1,73 @@ +#!/bin/bash + +# see test-integration-cli for example usage of this script + +export PATH="$ABS_DEST/../binary:$ABS_DEST/../dynbinary:$ABS_DEST/../gccgo:$ABS_DEST/../dyngccgo:$PATH" + +if ! command -v docker &> /dev/null; then + echo >&2 'error: binary or dynbinary must be run before .integration-daemon-start' + false +fi + +# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers +exec 41>&1 42>&2 + +export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +export DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native} +export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + # Start apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + # reset container variable so apparmor profile is applied to process + # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16 + export container="" + ( + set -x + /etc/init.d/apparmor start + ) + fi + + export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one + ( set -x; exec \ + docker --daemon --debug \ + --host "$DOCKER_HOST" \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --exec-driver "$DOCKER_EXECDRIVER" \ + --pidfile "$DEST/docker.pid" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + &> "$DEST/docker.log" + ) & + # make sure that if the script exits unexpectedly, we stop this daemon we just started + trap 'bundle .integration-daemon-stop' EXIT +else + export DOCKER_HOST="$DOCKER_TEST_HOST" +fi + +# give it a second to come up so it's "ready" +tries=10 +while ! docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + if [ -z "$DOCKER_HOST" ]; then + echo >&2 "error: daemon failed to start" + echo >&2 " check $DEST/docker.log for details" + else + echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" + docker version >&2 || true + fi + false + fi + sleep 2 +done diff --git a/hack/make/.integration-daemon-stop b/hack/make/.integration-daemon-stop new file mode 100644 index 00000000..a72f1810 --- /dev/null +++ b/hack/make/.integration-daemon-stop @@ -0,0 +1,21 @@ +#!/bin/bash + +trap - EXIT # reset EXIT trap applied in .integration-daemon-start + +for pidFile in $(find "$DEST" -name docker.pid); do + pid=$(set -x; cat "$pidFile") + ( set -x; kill "$pid" ) + if ! wait "$pid"; then + echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" + fi +done + +if [ -z "$DOCKER_TEST_HOST" ]; then + # Stop apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + ( + set -x + /etc/init.d/apparmor stop + ) + fi +fi diff --git a/hack/make/.resources-windows/docker.exe.manifest b/hack/make/.resources-windows/docker.exe.manifest new file mode 100644 index 00000000..674bc942 --- /dev/null +++ b/hack/make/.resources-windows/docker.exe.manifest @@ -0,0 +1,18 @@ + + + Docker + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/hack/make/.resources-windows/docker.ico b/hack/make/.resources-windows/docker.ico new file mode 100644 index 00000000..c6506ec8 Binary files /dev/null and b/hack/make/.resources-windows/docker.ico differ diff --git a/hack/make/.resources-windows/docker.png b/hack/make/.resources-windows/docker.png new file mode 100644 index 00000000..88df0b66 Binary files /dev/null and b/hack/make/.resources-windows/docker.png differ diff --git a/hack/make/.validate b/hack/make/.validate new file mode 100644 index 00000000..7397d0fa --- /dev/null +++ b/hack/make/.validate @@ -0,0 +1,33 @@ +#!/bin/bash + +if [ -z "$VALIDATE_UPSTREAM" ]; then + # this is kind of an expensive check, so let's not do this twice if we + # are running more than one validate bundlescript + + VALIDATE_REPO='https://github.com/docker/docker.git' + VALIDATE_BRANCH='master' + + if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then + VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" + VALIDATE_BRANCH="${TRAVIS_BRANCH}" + fi + + VALIDATE_HEAD="$(git rev-parse --verify HEAD)" + + git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" + VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" + + VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" + VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" + + validate_diff() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git diff "$VALIDATE_COMMIT_DIFF" "$@" + fi + } + validate_log() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git log "$VALIDATE_COMMIT_LOG" "$@" + fi + } +fi diff --git a/hack/make/README.md b/hack/make/README.md new file mode 100644 index 00000000..6574b0ef --- /dev/null +++ b/hack/make/README.md @@ -0,0 +1,17 @@ +This directory holds scripts called by `make.sh` in the parent directory. + +Each script is named after the bundle it creates. +They should not be called directly - instead, pass it as argument to make.sh, for example: + +``` +./hack/make.sh test +./hack/make.sh binary ubuntu + +# Or to run all bundles: +./hack/make.sh +``` + +To add a bundle: + +* Create a shell-compatible file here +* Add it to $DEFAULT_BUNDLES in make.sh diff --git a/hack/make/binary b/hack/make/binary new file mode 100644 index 00000000..d7643364 --- /dev/null +++ b/hack/make/binary @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +BINARY_NAME="docker-$VERSION" +BINARY_EXTENSION="$(binary_extension)" +BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + +source "${MAKEDIR}/.go-autogen" + +echo "Building: $DEST/$BINARY_FULLNAME" +go build \ + -o "$DEST/$BINARY_FULLNAME" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC_DOCKER + " \ + ./docker + +echo "Created binary: $DEST/$BINARY_FULLNAME" +ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" + +hash_files "$DEST/$BINARY_FULLNAME" diff --git a/hack/make/build-deb b/hack/make/build-deb new file mode 100644 index 00000000..deab30c2 --- /dev/null +++ b/hack/make/build-deb @@ -0,0 +1,71 @@ +#!/bin/bash +set -e + +# subshell so that we can export PATH and TZ without breaking other things +( + export TZ=UTC # make sure our "date" variables are UTC-based + bundle .integration-daemon-start + + # TODO consider using frozen images for the dockercore/builder-deb tags + + tilde='~' # ouch Bash 4.2 vs 4.3, you keel me + debVersion="${VERSION//-/$tilde}" # using \~ or '~' here works in 4.3, but not 4.2; just ~ causes $HOME to be inserted, hence the $tilde + # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better + if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + gitUnix="$(git log -1 --pretty='%at')" + gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" + gitCommit="$(git log -1 --pretty='%h')" + gitVersion="git${gitDate}.0.${gitCommit}" + # gitVersion is now something like 'git20150128.112847.0.17e840a' + debVersion="$debVersion~$gitVersion" + + # $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false + # true + # $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false + # true + # $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false + # true + + # ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + fi + + debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" + debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" + debDate="$(date --rfc-2822)" + + # if go-md2man is available, pre-generate the man pages + ./man/md2man-all.sh -q || true + # TODO decide if it's worth getting go-md2man in _each_ builder environment to avoid this + + # TODO add a configurable knob for _which_ debs to build so we don't have to modify the file or build all of them every time we need to test + for dir in contrib/builder/deb/*/; do + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-deb:$version" + if ! docker inspect "$image" &> /dev/null; then + ( set -x && docker build -t "$image" "$dir" ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + WORKDIR /usr/src/docker + COPY . /usr/src/docker + EOF + if [ "$DOCKER_EXPERIMENTAL" ]; then + echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN ln -sfv hack/make/.build-deb debian + RUN { echo '$debSource (${debVersion}-0~${suite}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog + RUN dpkg-buildpackage -uc -us + EOF + tempImage="docker-temp/build-deb:$version" + ( set -x && docker build -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . ) + docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/build-rpm b/hack/make/build-rpm new file mode 100644 index 00000000..9606bbc5 --- /dev/null +++ b/hack/make/build-rpm @@ -0,0 +1,78 @@ +#!/bin/bash +set -e + +# subshell so that we can export PATH and TZ without breaking other things +( + export TZ=UTC # make sure our "date" variables are UTC-based + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" + + # TODO consider using frozen images for the dockercore/builder-rpm tags + + rpmName=docker-engine + rpmVersion="${VERSION%%-*}" + rpmRelease=1 + + # rpmRelease versioning is as follows + # Docker 1.7.0: version=1.7.0, release=1 + # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 + # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH + + # if we have a "-rc*" suffix, set appropriate release + if [[ "$VERSION" == *-rc* ]]; then + rcVersion=${VERSION#*-rc} + rpmRelease="0.${rcVersion}.rc${rcVersion}" + fi + + # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better + if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + gitUnix="$(git log -1 --pretty='%at')" + gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" + gitCommit="$(git log -1 --pretty='%h')" + gitVersion="${gitDate}.git${gitCommit}" + # gitVersion is now something like '20150128.112847.17e840a' + rpmRelease="0.0.$gitVersion" + fi + + rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" + rpmDate="$(date +'%a %b %d %Y')" + + # if go-md2man is available, pre-generate the man pages + ./man/md2man-all.sh -q || true + # TODO decide if it's worth getting go-md2man in _each_ builder environment to avoid this + + # TODO add a configurable knob for _which_ rpms to build so we don't have to modify the file or build all of them every time we need to test + for dir in contrib/builder/rpm/*/; do + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-rpm:$version" + if ! docker inspect "$image" &> /dev/null; then + ( set -x && docker build -t "$image" "$dir" ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + COPY . /usr/src/${rpmName} + EOF + if [ "$DOCKER_EXPERIMENTAL" ]; then + echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN mkdir -p /root/rpmbuild/SOURCES + WORKDIR /root/rpmbuild + RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS + RUN tar -cz -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar.gz ${rpmName} + WORKDIR /root/rpmbuild/SPECS + RUN { echo '* $rpmDate $rpmPackager $rpmVersion-$rpmRelease'; echo '* Version: $VERSION'; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec + RUN rpmbuild -ba --define '_release $rpmRelease' --define '_version $rpmVersion' --define '_origversion $VERSION' ${rpmName}.spec + EOF + tempImage="docker-temp/build-rpm:$version" + ( set -x && docker build -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) + docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" +) 2>&1 | tee -a $DEST/test.log diff --git a/hack/make/cover b/hack/make/cover new file mode 100644 index 00000000..624943b8 --- /dev/null +++ b/hack/make/cover @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +bundle_cover() { + coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) + for p in "${coverprofiles[@]}"; do + echo + ( + set -x + go tool cover -func="$p" + ) + done +} + +if [ "$HAVE_GO_TEST_COVER" ]; then + bundle_cover 2>&1 | tee "$DEST/report.log" +else + echo >&2 'warning: the current version of go does not support -cover' + echo >&2 ' skipping test coverage report' +fi diff --git a/hack/make/cross b/hack/make/cross new file mode 100644 index 00000000..b5eab681 --- /dev/null +++ b/hack/make/cross @@ -0,0 +1,33 @@ +#!/bin/bash +set -e + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 +) + +# if we have our linux/amd64 version compiled, let's symlink it in +if [ -x "$DEST/../binary/docker-$VERSION" ]; then + mkdir -p "$DEST/linux/amd64" + ( + cd "$DEST/linux/amd64" + ln -s ../../../binary/* ./ + ) + echo "Created symlinks:" "$DEST/linux/amd64/"* +fi + +for platform in $DOCKER_CROSSPLATFORMS; do + ( + export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + export GOOS=${platform%/*} + export GOARCH=${platform##*/} + if [ -z "${daemonSupporting[$platform]}" ]; then + export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported + fi + source "${MAKEDIR}/binary" + ) +done diff --git a/hack/make/dynbinary b/hack/make/dynbinary new file mode 100644 index 00000000..e5fc0be2 --- /dev/null +++ b/hack/make/dynbinary @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +if [ -z "$DOCKER_CLIENTONLY" ]; then + source "${MAKEDIR}/.dockerinit" + + hash_files "$DEST/dockerinit-$VERSION" +else + # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) + export DOCKER_INITSHA1="" +fi +# DOCKER_INITSHA1 is exported so that other bundlescripts can easily access it later without recalculating it + +( + export IAMSTATIC="false" + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/binary" +) diff --git a/hack/make/dyngccgo b/hack/make/dyngccgo new file mode 100644 index 00000000..df76ac7f --- /dev/null +++ b/hack/make/dyngccgo @@ -0,0 +1,21 @@ +#!/bin/bash +set -e + +if [ -z "$DOCKER_CLIENTONLY" ]; then + source "${MAKEDIR}/.dockerinit-gccgo" + + hash_files "$DEST/dockerinit-$VERSION" +else + # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) + export DOCKER_INITSHA1="" +fi +# DOCKER_INITSHA1 is exported so that other bundlescripts can easily access it later without recalculating it + +( + export IAMSTATIC="false" + export EXTLDFLAGS_STATIC_DOCKER='' + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/gccgo" +) diff --git a/hack/make/gccgo b/hack/make/gccgo new file mode 100644 index 00000000..972934ab --- /dev/null +++ b/hack/make/gccgo @@ -0,0 +1,27 @@ +#!/bin/bash +set -e + +BINARY_NAME="docker-$VERSION" +BINARY_EXTENSION="$(binary_extension)" +BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + +source "${MAKEDIR}/.go-autogen" + +if [[ "${BUILDFLAGS[@]}" =~ 'netgo ' ]]; then + EXTLDFLAGS_STATIC_DOCKER+=' -lnetgo' +fi +go build -compiler=gccgo \ + -o "$DEST/$BINARY_FULLNAME" \ + "${BUILDFLAGS[@]}" \ + -gccgoflags " + -g + $EXTLDFLAGS_STATIC_DOCKER + -Wl,--no-export-dynamic + -ldl + " \ + ./docker + +echo "Created binary: $DEST/$BINARY_FULLNAME" +ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" + +hash_files "$DEST/$BINARY_FULLNAME" diff --git a/hack/make/release-deb b/hack/make/release-deb new file mode 100755 index 00000000..0d77bcd7 --- /dev/null +++ b/hack/make/release-deb @@ -0,0 +1,150 @@ +#!/bin/bash +set -e + +# This script creates the apt repos for the .deb files generated by hack/make/build-deb +# +# The following can then be used as apt sources: +# deb http://apt.dockerproject.org/repo $distro-$release $version +# +# For example: +# deb http://apt.dockerproject.org/repo ubuntu-trusy main +# deb http://apt.dockerproject.org/repo ubuntu-vivid testing +# deb http://apt.dockerproject.org/repo debian-wheezy experimental +# deb http://apt.dockerproject.org/repo debian-jessie main +# +# ... and so on and so forth for the builds created by hack/make/build-deb + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# setup the apt repo (if it does not exist) +mkdir -p "$APTDIR/conf" "$APTDIR/db" + +# supported arches/sections +arches=( amd64 i386 ) +components=( main testing experimental ) + +# create/update distributions file +if [ ! -f "$APTDIR/conf/distributions" ]; then + for suite in $(exec contrib/reprepro/suites.sh); do + cat <<-EOF + Origin: Docker + Suite: $suite + Codename: $suite + Architectures: ${arches[*]} + Components: ${components[*]} + Description: Docker APT Repository + + EOF + done > "$APTDIR/conf/distributions" +fi + +# create/update distributions file +if [ ! -f "$APTDIR/conf/apt-ftparchive.conf" ]; then + cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf" + Dir { + ArchiveDir "${APTDIR}"; + CacheDir "${APTDIR}/db"; + }; + + Default { + Packages::Compress ". gzip bzip2"; + Sources::Compress ". gzip bzip2"; + Contents::Compress ". gzip bzip2"; + }; + + TreeDefault { + BinCacheDB "packages-\$(SECTION)-\$(ARCH).db"; + Directory "pool/\$(SECTION)"; + Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages"; + SrcDirectory "pool/\$(SECTION)"; + Sources "\$(DIST)/\$(SECTION)/source/Sources"; + Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)"; + FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist"; + }; + EOF + + for suite in $(exec contrib/reprepro/suites.sh); do + cat <<-EOF + Tree "dists/${suite}" { + Sections "main testing experimental"; + Architectures "${arches[*]}"; + } + + EOF + done >> "$APTDIR/conf/apt-ftparchive.conf" +fi + +if [ ! -f "$APTDIR/conf/docker-engine-release.conf" ]; then + cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf" + APT::FTPArchive::Release::Origin "Docker"; + APT::FTPArchive::Release::Components "${components[*]}"; + APT::FTPArchive::Release::Label "Docker APT Repository"; + APT::FTPArchive::Release::Architectures "${arches[*]}"; + EOF +fi + +# set the component and priority for the version being released +component="main" +priority=700 + +if [[ "$VERSION" == *-rc* ]]; then + component="testing" + priority=650 +fi + +if [ $DOCKER_EXPERIMENTAL ] || [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + component="experimental" + priority=600 +fi + +# release the debs +for dir in contrib/builder/deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + # add the deb for each component for the distro version with reprepro + DEBFILE=( "bundles/$VERSION/build-deb/$version/docker-engine"*.deb ) + + # if we have a $GPG_PASSPHRASE we may as well + # dpkg-sign before reprepro + if [ ! -z "$GPG_PASSPHRASE" ]; then + dpkg-sig -g "--passphrase $GPG_PASSPHRASE" \ + -k releasedocker --sign builder "${DEBFILE[@]}" + fi + + reprepro -v --keepunreferencedfiles \ + -S docker-engine -P "$priority" -C "$component" \ + -b "$APTDIR" includedeb "$codename" "${DEBFILE[@]}" + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" +done + + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dir in contrib/builder/deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Component=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done +done diff --git a/hack/make/release-rpm b/hack/make/release-rpm new file mode 100755 index 00000000..406e28a8 --- /dev/null +++ b/hack/make/release-rpm @@ -0,0 +1,74 @@ +#!/bin/bash +set -e + +# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm +# +# The following can then be used as a yum repo: +# http://yum.dockerproject.org/repo/$release/$distro/$distro-version +# +# For example: +# http://yum.dockerproject.org/repo/main/fedora/22 +# http://yum.dockerproject.org/repo/testing/centos/6 +# http://yum.dockerproject.org/repo/experimental/fedora/21 +# http://yum.dockerproject.org/repo/main/centos/7 +# +# ... and so on and so forth for the builds created by hack/make/build-rpm + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo + +# manage the repos for each distribution seperately +distros=( fedora centos oraclelinux ) + +# get the release +release="main" + +if [[ "$VERSION" == *-rc* ]]; then + release="testing" +fi + +if [ $DOCKER_EXPERIMENTAL ] || [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + release="experimental" +fi + +for distro in "${distros[@]}"; do + # Setup the yum repo + REPO=$YUMDIR/$release/$distro + + for dir in contrib/builder/rpm/$distro-*/; do + version="$(basename "$dir")" + suite="${version##*-}" + + # if the directory does not exist, intialize the yum repo + if [[ ! -d $REPO/$suite/Packages ]]; then + mkdir -p "$REPO/$suite/Packages" + + createrepo --pretty "$REPO/$suite" + fi + + # path to rpms + RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/x86_64/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm ) + + # if we have a $GPG_PASSPHRASE we may as well + # sign the rpms before adding to repo + if [ ! -z $GPG_PASSPHRASE ]; then + # export our key to rpm import + gpg --armor --export releasedocker > /tmp/gpg + rpm --import /tmp/gpg + + # sign the rpms + rpm \ + --define '_gpg_name releasedocker' \ + --define '_signature gpg' \ + --define '__gpg_check_password_cmd /bin/true' \ + --define '__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u "%{_gpg_name}" --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}' \ + --resign "${RPMFILE[@]}" + fi + + # copy the rpms to the packages folder + cp "$RPMFILE" "$REPO/$suite/Packages" + + # update the repo + createrepo --pretty --update "$REPO/$suite" + done +done diff --git a/hack/make/sign-repos b/hack/make/sign-repos new file mode 100755 index 00000000..de227535 --- /dev/null +++ b/hack/make/sign-repos @@ -0,0 +1,50 @@ +#!/bin/bash + +# This script signs the deliverables from release-deb and release-rpm +# with a designated GPG key. + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo + +if [ -z "$GPG_PASSPHRASE" ]; then + echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts' + exit 1 +fi + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before sign-repos' + exit 1 +fi + +sign_packages(){ + # sign apt repo metadata + if [ -d $APTDIR ]; then + # create file with public key + gpg --armor --export releasedocker > "$DOCKER_RELEASE_DIR/apt/gpg" + + # sign the repo metadata + for F in $(find $APTDIR -name Release); do + gpg -u releasedocker --passphrase "$GPG_PASSPHRASE" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.gpg" "$F" + done + fi + + # sign yum repo metadata + if [ -d $YUMDIR ]; then + # create file with public key + gpg --armor --export releasedocker > "$DOCKER_RELEASE_DIR/yum/gpg" + + # sign the repo metadata + for F in $(find $YUMDIR -name repomd.xml ); do + gpg -u releasedocker --passphrase "$GPG_PASSPHRASE" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.asc" "$F" + done + fi +} + +sign_packages diff --git a/hack/make/test-docker-py b/hack/make/test-docker-py new file mode 100644 index 00000000..83fd6207 --- /dev/null +++ b/hack/make/test-docker-py @@ -0,0 +1,18 @@ +#!/bin/bash +set -e + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + dockerPy='/docker-py' + [ -d "$dockerPy" ] || { + dockerPy="$DEST/docker-py" + git clone https://github.com/docker/docker-py.git "$dockerPy" + } + + # exporting PYTHONPATH to import "docker" from our local docker-py + test_env PYTHONPATH="$dockerPy" NOT_ON_HOST=true python "$dockerPy/tests/integration_test.py" + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli new file mode 100644 index 00000000..3b4488e1 --- /dev/null +++ b/hack/make/test-integration-cli @@ -0,0 +1,18 @@ +#!/bin/bash +set -e + +bundle_test_integration_cli() { + TESTFLAGS="$TESTFLAGS -check.v" + go_test_dir ./integration-cli +} + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + bundle .integration-daemon-setup + + bundle_test_integration_cli + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-unit b/hack/make/test-unit new file mode 100644 index 00000000..1053f615 --- /dev/null +++ b/hack/make/test-unit @@ -0,0 +1,86 @@ +#!/bin/bash +set -e + +: ${PARALLEL_JOBS:=$(nproc 2>/dev/null || echo 1)} # if nproc fails (usually because we don't have it), let's not parallelize by default + +RED=$'\033[31m' +GREEN=$'\033[32m' +TEXTRESET=$'\033[0m' # reset the foreground colour + +# Run Docker's test suite, including sub-packages, and store their output as a bundle +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, eg. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +bundle_test_unit() { + { + date + + # Run all the tests if no TESTDIRS were specified. + if [ -z "$TESTDIRS" ]; then + TESTDIRS=$(find_dirs '*_test.go') + fi + ( + export LDFLAGS + export TESTFLAGS + export HAVE_GO_TEST_COVER + + # some hack to export array variables + export BUILDFLAGS_FILE="$DEST/buildflags-file" + ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE" + + if command -v parallel &> /dev/null; then + # accomodate parallel to be able to access variables + export SHELL="$BASH" + export HOME="$(mktemp -d)" + mkdir -p "$HOME/.parallel" + touch "$HOME/.parallel/ignored_vars" + + echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --env _ "${MAKEDIR}/.go-compile-test-dir" + rm -rf "$HOME" + else + # aww, no "parallel" available - fall back to boring + for test_dir in $TESTDIRS; do + "${MAKEDIR}/.go-compile-test-dir" "$test_dir" || true + # don't let one directory that fails to build tank _all_ our tests! + done + fi + rm -f "$BUILDFLAGS_FILE" + ) + echo "$TESTDIRS" | go_run_test_dir + } +} + +go_run_test_dir() { + TESTS_FAILED=() + while read dir; do + echo + echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" + precompiled="$ABS_DEST/precompiled/$dir.test$(binary_extension)" + if ! ( cd "$dir" && test_env "$precompiled" $TESTFLAGS ); then + TESTS_FAILED+=("$dir") + echo + echo "${RED}Tests failed: $dir${TEXTRESET}" + sleep 1 # give it a second, so observers watching can take note + fi + done + + echo + echo + echo + + # if some tests fail, we want the bundlescript to fail, but we want to + # try running ALL the tests first, hence TESTS_FAILED + if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then + echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}" + echo + false + else + echo "${GREEN}Test success${TEXTRESET}" + echo + true + fi +} + +bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/tgz b/hack/make/tgz new file mode 100644 index 00000000..a512b832 --- /dev/null +++ b/hack/make/tgz @@ -0,0 +1,33 @@ +#!/bin/bash + +CROSS="$DEST/../cross" + +set -e + +if [ ! -d "$CROSS/linux/amd64" ]; then + echo >&2 'error: binary and cross must be run before tgz' + false +fi + +for d in "$CROSS/"*/*; do + GOARCH="$(basename "$d")" + GOOS="$(basename "$(dirname "$d")")" + BINARY_NAME="docker-$VERSION" + BINARY_EXTENSION="$(export GOOS && binary_extension)" + BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + mkdir -p "$DEST/$GOOS/$GOARCH" + TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz" + + mkdir -p "$DEST/build" + + mkdir -p "$DEST/build/usr/local/bin" + cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION" + + tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr + + hash_files "$TGZ" + + rm -rf "$DEST/build" + + echo "Created tgz: $TGZ" +done diff --git a/hack/make/ubuntu b/hack/make/ubuntu new file mode 100644 index 00000000..0421dc36 --- /dev/null +++ b/hack/make/ubuntu @@ -0,0 +1,190 @@ +#!/bin/bash + +PKGVERSION="${VERSION//-/'~'}" +# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + GIT_UNIX="$(git log -1 --pretty='%at')" + GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" + GIT_COMMIT="$(git log -1 --pretty='%h')" + GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" + # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' + PKGVERSION="$PKGVERSION~$GIT_VERSION" +fi + +# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false +# true + +# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + +PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" +PACKAGE_URL="https://www.docker.com/" +PACKAGE_MAINTAINER="support@docker.com" +PACKAGE_DESCRIPTION="Linux container runtime +Docker complements LXC with a high-level API which operates at the process +level. It runs unix processes with strong guarantees of isolation and +repeatability across servers. +Docker is a great building block for automating distributed systems: +large-scale web deployments, database clusters, continuous deployment systems, +private PaaS, service-oriented architectures, etc." +PACKAGE_LICENSE="Apache-2.0" + +# Build docker as an ubuntu package using FPM and REPREPRO (sue me). +# bundle_binary must be called first. +bundle_ubuntu() { + DIR="$ABS_DEST/build" + + # Include our udev rules + mkdir -p "$DIR/etc/udev/rules.d" + cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" + + # Include our init scripts + mkdir -p "$DIR/etc/init" + cp contrib/init/upstart/docker.conf "$DIR/etc/init/" + mkdir -p "$DIR/etc/init.d" + cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" + mkdir -p "$DIR/etc/default" + cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" + mkdir -p "$DIR/lib/systemd/system" + cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" + + # Include contributed completions + mkdir -p "$DIR/etc/bash_completion.d" + cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" + mkdir -p "$DIR/usr/share/zsh/vendor-completions" + cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" + mkdir -p "$DIR/etc/fish/completions" + cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" + + # Include contributed man pages + man/md2man-all.sh -q + manRoot="$DIR/usr/share/man" + mkdir -p "$manRoot" + for manDir in man/man?; do + manBase="$(basename "$manDir")" # "man1" + for manFile in "$manDir"/*; do + manName="$(basename "$manFile")" # "docker-build.1" + mkdir -p "$manRoot/$manBase" + gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" + done + done + + # Copy the binary + # This will fail if the binary bundle hasn't been built + mkdir -p "$DIR/usr/bin" + cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" + + # Generate postinst/prerm/postrm scripts + cat > "$DEST/postinst" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = 'configure' ] && [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi +fi + +if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then + # we only need to do this if upstart isn't in charge + update-rc.d docker defaults > /dev/null || true +fi +if [ -n "$2" ]; then + _dh_action=restart +else + _dh_action=start +fi +service docker $_dh_action 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/prerm" <<'EOF' +#!/bin/sh +set -e +set -u + +service docker stop 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/postrm" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = "purge" ] ; then + update-rc.d docker remove > /dev/null || true +fi + +# In case this system is running systemd, we make systemd reload the unit files +# to pick up changes. +if [ -d /run/systemd/system ] ; then + systemctl --system daemon-reload > /dev/null || true +fi + +#DEBHELPER# +EOF + # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way + chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + + ( + # switch directories so we create *.deb in the right folder + cd "$DEST" + + # create lxc-docker-VERSION package + fpm -s dir -C "$DIR" \ + --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ + --after-install "$ABS_DEST/postinst" \ + --before-remove "$ABS_DEST/prerm" \ + --after-remove "$ABS_DEST/postrm" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --prefix / \ + --depends iptables \ + --deb-recommends aufs-tools \ + --deb-recommends ca-certificates \ + --deb-recommends git \ + --deb-recommends xz-utils \ + --deb-recommends 'cgroupfs-mount | cgroup-lite' \ + --deb-suggests apparmor \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ + --conflicts lxc-docker-virtual-package \ + --provides lxc-docker \ + --provides lxc-docker-virtual-package \ + --replaces lxc-docker \ + --replaces lxc-docker-virtual-package \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ + --config-files /etc/init/docker.conf \ + --config-files /etc/init.d/docker \ + --config-files /etc/default/docker \ + --deb-compression gz \ + -t deb . + # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available + + # create empty lxc-docker wrapper package + fpm -s empty \ + --name lxc-docker --version "$PKGVERSION" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --depends lxc-docker-$VERSION \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --deb-compression gz \ + -t deb + ) + + # clean up after ourselves so we have a clean output directory + rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + rm -r "$DIR" +} + +bundle_ubuntu diff --git a/hack/make/validate-dco b/hack/make/validate-dco new file mode 100644 index 00000000..5ac98728 --- /dev/null +++ b/hack/make/validate-dco @@ -0,0 +1,54 @@ +#!/bin/bash + +source "${MAKEDIR}/.validate" + +adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') +dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') +#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" + +: ${adds:=0} +: ${dels:=0} + +# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" +githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + +# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work +dcoPrefix='Signed-off-by:' +dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" + +check_dco() { + grep -qE "$dcoRegex" +} + +if [ $adds -eq 0 -a $dels -eq 0 ]; then + echo '0 adds, 0 deletions; nothing to validate! :)' +else + commits=( $(validate_log --format='format:%H%n') ) + badCommits=() + for commit in "${commits[@]}"; do + if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then + # no content (ie, Merge commit, etc) + continue + fi + if ! git log -1 --format='format:%B' "$commit" | check_dco; then + badCommits+=( "$commit" ) + fi + done + if [ ${#badCommits[@]} -eq 0 ]; then + echo "Congratulations! All commits are properly signed with the DCO!" + else + { + echo "These commits do not have a proper '$dcoPrefix' marker:" + for commit in "${badCommits[@]}"; do + echo " - $commit" + done + echo + echo 'Please amend each commit to include a properly formatted DCO marker.' + echo + echo 'Visit the following URL for information about the Docker DCO:' + echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' + echo + } >&2 + false + fi +fi diff --git a/hack/make/validate-gofmt b/hack/make/validate-gofmt new file mode 100644 index 00000000..7ad9e855 --- /dev/null +++ b/hack/make/validate-gofmt @@ -0,0 +1,30 @@ +#!/bin/bash + +source "${MAKEDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi diff --git a/hack/make/validate-lint b/hack/make/validate-lint new file mode 100644 index 00000000..0c5b9f29 --- /dev/null +++ b/hack/make/validate-lint @@ -0,0 +1,66 @@ +#!/bin/bash + +source "${MAKEDIR}/.validate" + +# We will eventually get to the point when packages should be the complete list +# of subpackages, vendoring excluded, as given by: +# +# packages=( $(go list ./... 2> /dev/null | grep -vE "^github.com/docker/docker/vendor" || true ) ) + +packages=( + builder + builder/command + builder/parser + builder/parser/dumper + daemon/events + daemon/execdriver/native/template + daemon/network + docker + dockerinit + integration-cli + pkg/chrootarchive + pkg/directory + pkg/fileutils + pkg/homedir + pkg/listenbuffer + pkg/mflag/example + pkg/mount + pkg/namesgenerator + pkg/nat + pkg/promise + pkg/pubsub + pkg/random + pkg/reexec + pkg/symlink + pkg/timeutils + pkg/tlsconfig + pkg/urlutil + pkg/version + registry + utils +) + +errors=() +for p in "${packages[@]}"; do + # Run golint on package/*.go file explicitly to validate all go files + # and not just the ones for the current platform. + failedLint=$(golint "$p"/*.go) + if [ "$failedLint" ]; then + errors+=( "$failedLint" ) + fi +done + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been linted.' +else + { + echo "Errors from golint:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please fix the above errors. You can test via "golint" and commit the result.' + echo + } >&2 + false +fi diff --git a/hack/make/validate-pkg b/hack/make/validate-pkg new file mode 100644 index 00000000..d5843417 --- /dev/null +++ b/hack/make/validate-pkg @@ -0,0 +1,32 @@ +#!/bin/bash +set -e + +source "${MAKEDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + IFS=$'\n' + badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -E '^github.com/docker/docker' || true) ) + unset IFS + + for import in "${badImports[@]}"; do + badFiles+=( "$f imports $import" ) + done +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! "./pkg/..." is safely isolated from internal code.' +else + { + echo 'These files import internal code: (either directly or indirectly)' + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/hack/make/validate-test b/hack/make/validate-test new file mode 100644 index 00000000..d9d05f3b --- /dev/null +++ b/hack/make/validate-test @@ -0,0 +1,35 @@ +#!/bin/bash + +# Make sure we're not using gos' Testing package any more in integration-cli + +source "${MAKEDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # skip check_test.go since it *does* use the testing package + if [ "$f" = "integration-cli/check_test.go" ]; then + continue + fi + + # we use "git show" here to validate that what's committed is formatted + if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! No testing.T found.' +else + { + echo "These files use the wrong testing infrastructure:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/hack/make/validate-toml b/hack/make/validate-toml new file mode 100644 index 00000000..18f26ee7 --- /dev/null +++ b/hack/make/validate-toml @@ -0,0 +1,30 @@ +#!/bin/bash + +source "${MAKEDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All toml source files changed here have valid syntax.' +else + { + echo "These files are not valid toml:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files as valid toml' + echo + } >&2 + false +fi diff --git a/hack/make/validate-vet b/hack/make/validate-vet new file mode 100644 index 00000000..febe93e5 --- /dev/null +++ b/hack/make/validate-vet @@ -0,0 +1,32 @@ +#!/bin/bash + +source "${MAKEDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed passes go vet + failedVet=$(go vet "$f") + if [ "$failedVet" ]; then + errors+=( "$failedVet" ) + fi +done + + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been vetted.' +else + { + echo "Errors from go vet:" + for err in "${errors[@]}"; do + echo " - $err" + done + echo + echo 'Please fix the above errors. You can test via "go vet" and commit the result.' + echo + } >&2 + false +fi diff --git a/hack/release.sh b/hack/release.sh new file mode 100755 index 00000000..b56d69e8 --- /dev/null +++ b/hack/release.sh @@ -0,0 +1,401 @@ +#!/usr/bin/env bash +set -e + +# This script looks for bundles built by make.sh, and releases them on a +# public S3 bucket. +# +# Bundles should be available for the VERSION string passed as argument. +# +# The correct way to call this script is inside a container built by the +# official Dockerfile at the root of the Docker source code. The Dockerfile, +# make.sh and release.sh should all be from the same source code revision. + +set -o pipefail + +# Print a usage message and exit. +usage() { + cat >&2 <<'EOF' +To run, I need: +- to be in a container generated by the Dockerfile at the top of the Docker + repository; +- to be provided with the name of an S3 bucket, in environment variable + AWS_S3_BUCKET; +- to be provided with AWS credentials for this S3 bucket, in environment + variables AWS_ACCESS_KEY and AWS_SECRET_KEY; +- the passphrase to unlock the GPG key which will sign the deb packages + (passed as environment variable GPG_PASSPHRASE); +- a generous amount of good will and nice manners. +The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" + +docker run -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY=... \ + -e AWS_SECRET_KEY=... \ + -e GPG_PASSPHRASE=... \ + -i -t --privileged \ + docker ./hack/release.sh +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage +[ "$AWS_ACCESS_KEY" ] || usage +[ "$AWS_SECRET_KEY" ] || usage +[ "$GPG_PASSPHRASE" ] || usage +[ -d /go/src/github.com/docker/docker ] || usage +cd /go/src/github.com/docker/docker +[ -x hack/make.sh ] || usage + +RELEASE_BUNDLES=( + binary + cross + tgz + ubuntu +) + +if [ "$1" != '--release-regardless-of-test-failure' ]; then + RELEASE_BUNDLES=( + test-unit + "${RELEASE_BUNDLES[@]}" + test-integration-cli + ) +fi + +VERSION=$(< VERSION) +BUCKET=$AWS_S3_BUCKET + +# These are the 2 keys we've used to sign the deb's +# release (get.docker.com) +# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" +# test (test.docker.com) +# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" + +setup_s3() { + echo "Setting up S3" + # Try creating the bucket. Ignore errors (it might already exist). + s3cmd mb "s3://$BUCKET" 2>/dev/null || true + # Check access to the bucket. + # s3cmd has no useful exit status, so we cannot check that. + # Instead, we check if it outputs anything on standard output. + # (When there are problems, it uses standard error instead.) + s3cmd info "s3://$BUCKET" | grep -q . + # Make the bucket accessible through website endpoints. + s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET" +} + +# write_to_s3 uploads the contents of standard input to the specified S3 url. +write_to_s3() { + DEST=$1 + F=`mktemp` + cat > "$F" + s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST" + rm -f "$F" +} + +s3_url() { + case "$BUCKET" in + get.docker.com|test.docker.com|experimental.docker.com) + echo "https://$BUCKET" + ;; + *) + s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' + ;; + esac +} + +build_all() { + echo "Building release" + if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then + echo >&2 + echo >&2 'The build or tests appear to have failed.' + echo >&2 + echo >&2 'You, as the release maintainer, now have a couple options:' + echo >&2 '- delay release and fix issues' + echo >&2 '- delay release and fix issues' + echo >&2 '- did we mention how important this is? issues need fixing :)' + echo >&2 + echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' + echo >&2 ' really knows all the hairy problems at hand with the current release' + echo >&2 ' issues) may bypass this checking by running this script again with the' + echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' + echo >&2 ' running the test suite, and will only build the binaries and packages. Please' + echo >&2 ' avoid using this if at all possible.' + echo >&2 + echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' + echo >&2 ' should be used. If there are release issues, we should always err on the' + echo >&2 ' side of caution.' + echo >&2 + exit 1 + fi +} + +upload_release_build() { + src="$1" + dst="$2" + latest="$3" + + echo + echo "Uploading $src" + echo " to $dst" + echo + s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst" + if [ "$latest" ]; then + echo + echo "Copying to $latest" + echo + s3cmd --acl-public cp "$dst" "$latest" + fi + + # get hash files too (see hash_files() in hack/make.sh) + for hashAlgo in md5 sha256; do + if [ -e "$src.$hashAlgo" ]; then + echo + echo "Uploading $src.$hashAlgo" + echo " to $dst.$hashAlgo" + echo + s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo" + if [ "$latest" ]; then + echo + echo "Copying to $latest.$hashAlgo" + echo + s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo" + fi + fi + done +} + +release_build() { + echo "Releasing binaries" + GOOS=$1 + GOARCH=$2 + + binDir=bundles/$VERSION/cross/$GOOS/$GOARCH + tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH + binary=docker-$VERSION + tgz=docker-$VERSION.tgz + + latestBase= + if [ -z "$NOLATEST" ]; then + latestBase=docker-latest + fi + + # we need to map our GOOS and GOARCH to uname values + # see https://en.wikipedia.org/wiki/Uname + # ie, GOOS=linux -> "uname -s"=Linux + + s3Os=$GOOS + case "$s3Os" in + darwin) + s3Os=Darwin + ;; + freebsd) + s3Os=FreeBSD + ;; + linux) + s3Os=Linux + ;; + windows) + s3Os=Windows + binary+='.exe' + if [ "$latestBase" ]; then + latestBase+='.exe' + fi + ;; + *) + echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" + exit 1 + ;; + esac + + s3Arch=$GOARCH + case "$s3Arch" in + amd64) + s3Arch=x86_64 + ;; + 386) + s3Arch=i386 + ;; + arm) + s3Arch=armel + # someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too + ;; + *) + echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" + exit 1 + ;; + esac + + s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch + latest= + latestTgz= + if [ "$latestBase" ]; then + latest="$s3Dir/$latestBase" + latestTgz="$s3Dir/$latestBase.tgz" + fi + + if [ ! -x "$binDir/$binary" ]; then + echo >&2 "error: can't find $binDir/$binary - was it compiled properly?" + exit 1 + fi + if [ ! -f "$tgzDir/$tgz" ]; then + echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" + exit 1 + fi + + upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" + upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" +} + +# Upload the 'ubuntu' bundle to S3: +# 1. A full APT repository is published at $BUCKET/ubuntu/ +# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index +release_ubuntu() { + echo "Releasing ubuntu" + [ -e "bundles/$VERSION/ubuntu" ] || { + echo >&2 './hack/make.sh must be run before release_ubuntu' + exit 1 + } + + local debfiles=( "bundles/$VERSION/ubuntu/"*.deb ) + + # Sign our packages + dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker --sign builder "${debfiles[@]}" + + # Setup the APT repo + APTDIR=bundles/$VERSION/ubuntu/apt + mkdir -p "$APTDIR/conf" "$APTDIR/db" + s3cmd sync "s3://$BUCKET/ubuntu/db/" "$APTDIR/db/" || true + cat > "$APTDIR/conf/distributions" < "bundles/$VERSION/ubuntu/gpg" + s3cmd --acl-public put "bundles/$VERSION/ubuntu/gpg" "s3://$BUCKET/gpg" + + local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9 + local s3Headers= + if [[ $BUCKET == test* ]]; then + gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6 + elif [[ $BUCKET == experimental* ]]; then + gpgFingerprint=E33FF7BF5C91D50A6F91FFFD4CC38D40F9A96B49 + s3Headers='--add-header=Cache-Control:no-cache' + fi + + # Upload repo + s3cmd --acl-public "$s3Headers" sync "$APTDIR/" "s3://$BUCKET/ubuntu/" + cat <&2 './hack/make.sh must be run before release_binaries' + exit 1 + } + + for d in bundles/$VERSION/cross/*/*; do + GOARCH="$(basename "$d")" + GOOS="$(basename "$(dirname "$d")")" + release_build "$GOOS" "$GOARCH" + done + + # TODO create redirect from builds/*/i686 to builds/*/i386 + + cat </dev/null || { + gpg --gen-key --batch < +

+ Layer +
+
+ Images are composed of layers. Image layer is a general + term which may be used to refer to one or both of the following: + +
    +
  1. The metadata for the layer, described in the JSON format.
  2. +
  3. The filesystem changes described by a layer.
  4. +
+ + To refer to the former you may use the term Layer JSON or + Layer Metadata. To refer to the latter you may use the term + Image Filesystem Changeset or Image Diff. +
+
+ Image JSON +
+
+ Each layer has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Image ID +
+
+ Each layer is given an ID upon its creation. It is + represented as a hexadecimal encoding of 256 bits, e.g., + a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Image IDs should be sufficiently random so as to be globally unique. + 32 bytes read from /dev/urandom is sufficient for all + practical purposes. Alternatively, an image ID may be derived as a + cryptographic hash of image contents as the result is considered + indistinguishable from random. The choice is left up to implementors. +
+
+ Image Parent +
+
+ Most layer metadata structs contain a parent field which + refers to the Image from which another directly descends. An image + contains a separate JSON metadata file and set of changes relative to + the filesystem of its parent image. Image Ancestor and + Image Descendant are also common terms. +
+
+ Image Checksum +
+
+ Layer metadata structs contain a cryptographic hash of the contents of + the layer's filesystem changeset. Though the set of changes exists as a + simple Tar archive, two archives with identical filenames and content + will have different SHA digests if the last-access or last-modified + times of any entries differ. For this reason, image checksums are + generated using the TarSum algorithm which produces a cryptographic + hash of file contents and selected headers only. Details of this + algorithm are described in the separate TarSum specification. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. An image name suffix (the name component after :) is + often referred to as a tag as well, though it strictly refers to the + full name of an image. Acceptable values for a tag suffix are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], punctuation + characters [._-], and MUST NOT contain a : + character. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. Acceptable values for repository name are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], and punctuation + characters [._-], however it MAY contain additional + / and : characters for organizational + purposes, with the last : character being interpreted + dividing the repository component of the name from the tag suffix + component. +
+ + +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", + "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", + "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", + "created": "2014-10-13T21:19:18.674353812Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "Size": 271828, + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + } +} +``` + +### Image JSON Field Descriptions + +
+
+ id string +
+
+ Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies + the image. +
+
+ parent string +
+
+ ID of the parent image. If there is no parent image then this field + should be omitted. A collection of images may share many of the same + ancestor layers. This organizational structure is strictly a tree with + any one layer having either no parent or a single parent and zero or + more descendent layers. Cycles are not allowed and implementations + should be careful to avoid creating them or iterating through a cycle + indefinitely. +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ checksum string +
+
+ Image Checksum of the filesystem changeset associated with the image + layer. +
+
+ Size integer +
+
+ The size in bytes of the filesystem changeset associated with the image + layer. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory named with the +ID of the image being created. Here is the initial empty directory structure +for the changeset for an image with ID `c3167915dc9d` ([real IDs are much +longer](#id_desc), but this example use a truncated one here for brevity. +Implementations need not name the rootfs directory in this way but it may be +convenient for keeping record of a large number of image layers.): + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +The TarSum checksum for the archive file is then computed and placed in the +JSON metadata along with the execution parameters. + +To make changes to the filesystem of this container image, create a new +directory named with a new ID, such as `f60c56784b83`, and initialize it with +a snapshot of the parent image's root filesystem, so that the directory is +identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem +can make this very efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - all image layer JSON files + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c +│   ├── VERSION +│   ├── json +│   └── layer.tar +└── repositories +``` + +There are one or more directories named with the ID for each layer in a full +image. Each of these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The JSON metadata for an image layer + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +And the `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. + +## Loading an Image Filesystem Changeset + +Unpacking a bundle of image layer JSON files and their corresponding filesystem +changesets can be done using a series of steps: + +1. Follow the parent IDs of image layers to find the root ancestor (an image +with no parent ID specified). +2. For every image layer, in order from root ancestor and descending down, +extract the contents of that layer's filesystem changeset archive into a +directory which will be used as the root of a container filesystem. + + - Extract all contents of each archive. + - Walk the directory tree once more, removing any files with the prefix + `.wh.` and the corresponding file or directory named without this prefix. + + +## Implementations + +This specification is an admittedly imperfect description of an +imperfectly-understood problem. The Docker project is, in turn, an attempt to +implement this specification. Our goal and our execution toward it will evolve +over time, but our primary concern in this specification and in our +implementation is compatibility and interoperability. diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go new file mode 100644 index 00000000..defa02ea --- /dev/null +++ b/integration-cli/check_test.go @@ -0,0 +1,95 @@ +package main + +import ( + "testing" + + "github.com/go-check/check" +) + +func Test(t *testing.T) { + check.TestingT(t) +} + +func init() { + check.Suite(&DockerSuite{}) +} + +type DockerSuite struct { +} + +func (s *DockerSuite) TearDownTest(c *check.C) { + deleteAllContainers() + deleteAllImages() +} + +func init() { + check.Suite(&DockerRegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistrySuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistrySuite) SetUpTest(c *check.C) { + s.reg = setupRegistry(c) + s.d = NewDaemon(c) +} + +func (s *DockerRegistrySuite) TearDownTest(c *check.C) { + s.reg.Close() + s.ds.TearDownTest(c) + if s.reg != nil { + s.reg.Close() + } + if s.ds != nil { + s.ds.TearDownTest(c) + } + s.d.Stop() +} + +func init() { + check.Suite(&DockerDaemonSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerDaemonSuite struct { + ds *DockerSuite + d *Daemon +} + +func (s *DockerDaemonSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) +} + +func (s *DockerDaemonSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerTrustSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerTrustSuite struct { + ds *DockerSuite + reg *testRegistryV2 + not *testNotary +} + +func (s *DockerTrustSuite) SetUpTest(c *check.C) { + s.reg = setupRegistry(c) + s.not = setupNotary(c) +} + +func (s *DockerTrustSuite) TearDownTest(c *check.C) { + s.reg.Close() + s.not.Close() + s.ds.TearDownTest(c) +} diff --git a/integration-cli/docker_api_attach_test.go b/integration-cli/docker_api_attach_test.go new file mode 100644 index 00000000..06a1c486 --- /dev/null +++ b/integration-cli/docker_api_attach_test.go @@ -0,0 +1,223 @@ +package main + +import ( + "bytes" + "io" + "net/http" + "net/http/httputil" + "strings" + "time" + + "github.com/go-check/check" + "golang.org/x/net/websocket" +) + +func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { + out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") + + rwc, err := sockConn(time.Duration(10 * time.Second)) + if err != nil { + c.Fatal(err) + } + + cleanedContainerID := strings.TrimSpace(out) + config, err := websocket.NewConfig( + "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", + "http://localhost", + ) + if err != nil { + c.Fatal(err) + } + + ws, err := websocket.NewClient(config, rwc) + if err != nil { + c.Fatal(err) + } + defer ws.Close() + + expected := []byte("hello") + actual := make([]byte, len(expected)) + + outChan := make(chan error) + go func() { + _, err := ws.Read(actual) + outChan <- err + close(outChan) + }() + + inChan := make(chan error) + go func() { + _, err := ws.Write(expected) + inChan <- err + close(inChan) + }() + + select { + case err := <-inChan: + if err != nil { + c.Fatal(err) + } + case <-time.After(5 * time.Second): + c.Fatal("Timeout writing to ws") + } + + select { + case err := <-outChan: + if err != nil { + c.Fatal(err) + } + case <-time.After(5 * time.Second): + c.Fatal("Timeout reading from ws") + } + + if !bytes.Equal(expected, actual) { + c.Fatal("Expected output on websocket to match input") + } +} + +// regression gh14320 +func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { + status, body, err := sockRequest("POST", "/containers/doesnotexist/attach", nil) + c.Assert(status, check.Equals, http.StatusNotFound) + c.Assert(err, check.IsNil) + expected := "no such id: doesnotexist\n" + if !strings.Contains(string(body), expected) { + c.Fatalf("Expected response body to contain %q", expected) + } +} + +func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) { + status, body, err := sockRequest("GET", "/containers/doesnotexist/attach/ws", nil) + c.Assert(status, check.Equals, http.StatusNotFound) + c.Assert(err, check.IsNil) + expected := "no such id: doesnotexist\n" + if !strings.Contains(string(body), expected) { + c.Fatalf("Expected response body to contain %q", expected) + } +} + +func (s *DockerSuite) TestPostContainersAttach(c *check.C) { + out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") + + r, w := io.Pipe() + defer r.Close() + defer w.Close() + + conn, err := sockConn(time.Duration(10 * time.Second)) + c.Assert(err, check.IsNil) + + containerID := strings.TrimSpace(out) + + req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) + c.Assert(err, check.IsNil) + + client := httputil.NewClientConn(conn, nil) + defer client.Close() + + // Do POST attach request + resp, err := client.Do(req) + c.Assert(resp.StatusCode, check.Equals, http.StatusOK) + // If we check the err, we get a ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"} + // This means that the remote requested this be the last request serviced, is this okay? + + // Test read and write to the attached container + expected := []byte("hello") + actual := make([]byte, len(expected)) + + outChan := make(chan error) + go func() { + _, err := r.Read(actual) + outChan <- err + close(outChan) + }() + + inChan := make(chan error) + go func() { + _, err := w.Write(expected) + inChan <- err + close(inChan) + }() + + select { + case err := <-inChan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout writing to stdout") + } + + select { + case err := <-outChan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout reading from stdin") + } + + if !bytes.Equal(expected, actual) { + c.Fatal("Expected output to match input") + } + + resp.Body.Close() +} + +func (s *DockerSuite) TestPostContainersAttachStderr(c *check.C) { + out, _ := dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2") + + r, w := io.Pipe() + defer r.Close() + defer w.Close() + + conn, err := sockConn(time.Duration(10 * time.Second)) + c.Assert(err, check.IsNil) + + containerID := strings.TrimSpace(out) + + req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) + c.Assert(err, check.IsNil) + + client := httputil.NewClientConn(conn, nil) + defer client.Close() + + // Do POST attach request + resp, err := client.Do(req) + c.Assert(resp.StatusCode, check.Equals, http.StatusOK) + // If we check the err, we get a ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"} + // This means that the remote requested this be the last request serviced, is this okay? + + // Test read and write to the attached container + expected := []byte("hello") + actual := make([]byte, len(expected)) + + outChan := make(chan error) + go func() { + _, err := r.Read(actual) + outChan <- err + close(outChan) + }() + + inChan := make(chan error) + go func() { + _, err := w.Write(expected) + inChan <- err + close(inChan) + }() + + select { + case err := <-inChan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout writing to stdout") + } + + select { + case err := <-outChan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout reading from stdin") + } + + if !bytes.Equal(expected, actual) { + c.Fatal("Expected output to match input") + } + + resp.Body.Close() +} diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go new file mode 100644 index 00000000..d8dc4483 --- /dev/null +++ b/integration-cli/docker_api_containers_test.go @@ -0,0 +1,1733 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httputil" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestContainerApiGetAll(c *check.C) { + startCount, err := getContainerCount() + if err != nil { + c.Fatalf("Cannot query container count: %v", err) + } + + name := "getall" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var inspectJSON []struct { + Names []string + } + if err = json.Unmarshal(body, &inspectJSON); err != nil { + c.Fatalf("unable to unmarshal response body: %v", err) + } + + if len(inspectJSON) != startCount+1 { + c.Fatalf("Expected %d container(s), %d found (started with: %d)", startCount+1, len(inspectJSON), startCount) + } + + if actual := inspectJSON[0].Names[0]; actual != "/"+name { + c.Fatalf("Container Name mismatch. Expected: %q, received: %q\n", "/"+name, actual) + } +} + +// regression test for empty json field being omitted #13691 +func (s *DockerSuite) TestContainerApiGetJSONNoFieldsOmitted(c *check.C) { + dockerCmd(c, "run", "busybox", "true") + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + // empty Labels field triggered this bug, make sense to check for everything + // cause even Ports for instance can trigger this bug + // better safe than sorry.. + fields := []string{ + "Id", + "Names", + "Image", + "Command", + "Created", + "Ports", + "Labels", + "Status", + } + + // decoding into types.Container do not work since it eventually unmarshal + // and empty field to an empty go map, so we just check for a string + for _, f := range fields { + if !strings.Contains(string(body), f) { + c.Fatalf("Field %s is missing and it shouldn't", f) + } + } +} + +type containerPs struct { + Names []string + Ports []map[string]interface{} +} + +// regression test for non-empty fields from #13901 +func (s *DockerSuite) TestContainerPsOmitFields(c *check.C) { + name := "pstest" + port := 80 + dockerCmd(c, "run", "-d", "--name", name, "--expose", strconv.Itoa(port), "busybox", "top") + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var resp []containerPs + err = json.Unmarshal(body, &resp) + c.Assert(err, check.IsNil) + + var foundContainer *containerPs + for _, container := range resp { + for _, testName := range container.Names { + if "/"+name == testName { + foundContainer = &container + break + } + } + } + + c.Assert(len(foundContainer.Ports), check.Equals, 1) + c.Assert(foundContainer.Ports[0]["PrivatePort"], check.Equals, float64(port)) + _, ok := foundContainer.Ports[0]["PublicPort"] + c.Assert(ok, check.Not(check.Equals), true) + _, ok = foundContainer.Ports[0]["IP"] + c.Assert(ok, check.Not(check.Equals), true) +} + +func (s *DockerSuite) TestContainerApiGetExport(c *check.C) { + name := "exportcontainer" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") + + status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if h.Name == "test" { + found = true + break + } + } + + if !found { + c.Fatalf("The created test file has not been found in the exported image") + } +} + +func (s *DockerSuite) TestContainerApiGetChanges(c *check.C) { + name := "changescontainer" + dockerCmd(c, "run", "--name", name, "busybox", "rm", "/etc/passwd") + + status, body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + changes := []struct { + Kind int + Path string + }{} + if err = json.Unmarshal(body, &changes); err != nil { + c.Fatalf("unable to unmarshal response body: %v", err) + } + + // Check the changelog for removal of /etc/passwd + success := false + for _, elem := range changes { + if elem.Path == "/etc/passwd" && elem.Kind == 2 { + success = true + } + } + if !success { + c.Fatalf("/etc/passwd has been removed but is not present in the diff") + } +} + +func (s *DockerSuite) TestContainerApiStartVolumeBinds(c *check.C) { + name := "testing" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + bindPath := randomUnixTmpDirPath("test") + config = map[string]interface{}{ + "Binds": []string{bindPath + ":/tmp"}, + } + status, _, err = sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, "/tmp") + if err != nil { + c.Fatal(err) + } + + if pth != bindPath { + c.Fatalf("expected volume host path to be %s, got %s", bindPath, pth) + } +} + +// Test for GH#10618 +func (s *DockerSuite) TestContainerApiStartDupVolumeBinds(c *check.C) { + name := "testdups" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + bindPath1 := randomUnixTmpDirPath("test1") + bindPath2 := randomUnixTmpDirPath("test2") + + config = map[string]interface{}{ + "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, + } + status, body, err := sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + + if !strings.Contains(string(body), "Duplicate bind") { + c.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err) + } +} + +func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) { + volName := "voltst" + volPath := "/tmp" + + dockerCmd(c, "run", "-d", "--name", volName, "-v", volPath, "busybox") + + name := "TestContainerApiStartVolumesFrom" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{volPath: {}}, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + config = map[string]interface{}{ + "VolumesFrom": []string{volName}, + } + status, _, err = sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, volPath) + if err != nil { + c.Fatal(err) + } + pth2, err := inspectMountSourceField(volName, volPath) + if err != nil { + c.Fatal(err) + } + + if pth != pth2 { + c.Fatalf("expected volume host path to be %s, got %s", pth, pth2) + } +} + +func (s *DockerSuite) TestGetContainerStats(c *check.C) { + var ( + name = "statscontainer" + ) + dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, check.IsNil) + c.Assert(sr.status, check.Equals, http.StatusOK) + + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + var s *types.Stats + // decode only one object from the stream + if err := dec.Decode(&s); err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + + buf := &channelBuffer{make(chan []byte, 1)} + defer buf.Close() + chErr := make(chan error) + go func() { + _, body, err := sockRequestRaw("GET", "/containers/"+id+"/stats?stream=1", nil, "application/json") + if err != nil { + chErr <- err + } + defer body.Close() + _, err = io.Copy(buf, body) + chErr <- err + }() + defer func() { + c.Assert(<-chErr, check.IsNil) + }() + + b := make([]byte, 32) + // make sure we've got some stats + _, err := buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, check.IsNil) + + // Now remove without `-f` and make sure we are still pulling stats + _, _, err = dockerCmdWithError(c, "rm", id) + c.Assert(err, check.Not(check.IsNil), check.Commentf("rm should have failed but didn't")) + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, check.IsNil) + dockerCmd(c, "rm", "-f", id) + + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, check.Not(check.IsNil)) +} + +// regression test for gh13421 +// previous test was just checking one stat entry so it didn't fail (stats with +// stream false always return one stat) +func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { + name := "statscontainer" + dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, check.IsNil) + c.Assert(sr.status, check.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of "read" of types.Stats + if l := strings.Count(s, "read"); l < 2 { + c.Fatalf("Expected more than one stat streamed, got %d", l) + } + } +} + +func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { + name := "statscontainer" + dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats?stream=0", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, check.IsNil) + c.Assert(sr.status, check.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of "read" of types.Stats + if l := strings.Count(s, "read"); l != 1 { + c.Fatalf("Expected only one stat streamed, got %d", l) + } + } +} + +func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { + // TODO: this test does nothing because we are c.Assert'ing in goroutine + var ( + name = "statscontainer" + ) + dockerCmd(c, "create", "--name", name, "busybox", "top") + + go func() { + // We'll never get return for GET stats from sockRequest as of now, + // just send request and see if panic or error would happen on daemon side. + status, _, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + }() + + // allow some time to send request and let daemon deal with it + time.Sleep(1 * time.Second) +} + +func (s *DockerSuite) TestBuildApiDockerfilePath(c *check.C) { + // Test to make sure we stop people from trying to leave the + // build context when specifying the path to the dockerfile + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte("FROM busybox") + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(dockerfile); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + res, body, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + + out, err := readBody(body) + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(string(out), "must be within the build context") { + c.Fatalf("Didn't complain about leaving build context: %s", out) + } +} + +func (s *DockerSuite) TestBuildApiDockerFileRemote(c *check.C) { + server, err := fakeStorage(map[string]string{ + "testD": `FROM busybox +COPY * /tmp/ +RUN find / -name ba* +RUN find /tmp/`, + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + + buf, err := readBody(body) + if err != nil { + c.Fatal(err) + } + + // Make sure Dockerfile exists. + // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL + out := string(buf) + if !strings.Contains(out, "/tmp/Dockerfile") || + strings.Contains(out, "baz") { + c.Fatalf("Incorrect output: %s", out) + } +} + +func (s *DockerSuite) TestBuildApiRemoteTarballContext(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte("FROM busybox") + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(dockerfile); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, check.IsNil) + + defer server.Close() + + res, b, err := sockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + b.Close() +} + +func (s *DockerSuite) TestBuildApiRemoteTarballContextWithCustomDockerfile(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox +RUN echo 'wrong'`) + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(dockerfile); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + + custom := []byte(`FROM busybox +RUN echo 'right' +`) + if err := tw.WriteHeader(&tar.Header{ + Name: "custom", + Size: int64(len(custom)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(custom); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, check.IsNil) + + defer server.Close() + url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" + res, body, err := sockRequestRaw("POST", url, nil, "application/tar") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + + defer body.Close() + content, err := readBody(body) + c.Assert(err, check.IsNil) + + if strings.Contains(string(content), "wrong") { + c.Fatalf("Build used the wrong dockerfile.") + } +} + +func (s *DockerSuite) TestBuildApiLowerDockerfile(c *check.C) { + git, err := newFakeGit("repo", map[string]string{ + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + + buf, err := readBody(body) + if err != nil { + c.Fatal(err) + } + + out := string(buf) + if !strings.Contains(out, "from dockerfile") { + c.Fatalf("Incorrect output: %s", out) + } +} + +func (s *DockerSuite) TestBuildApiBuildGitWithF(c *check.C) { + git, err := newFakeGit("repo", map[string]string{ + "baz": `FROM busybox +RUN echo from baz`, + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + }, false) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + + buf, err := readBody(body) + if err != nil { + c.Fatal(err) + } + + out := string(buf) + if !strings.Contains(out, "from baz") { + c.Fatalf("Incorrect output: %s", out) + } +} + +func (s *DockerSuite) TestBuildApiDoubleDockerfile(c *check.C) { + testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + + buf, err := readBody(body) + if err != nil { + c.Fatal(err) + } + + out := string(buf) + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("Incorrect output: %s", out) + } +} + +func (s *DockerSuite) TestBuildApiDockerfileSymlink(c *check.C) { + // Test to make sure we stop people from trying to leave the + // build context when specifying a symlink as the path to the dockerfile + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Typeflag: tar.TypeSymlink, + Linkname: "/etc/passwd", + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + + out, err := readBody(body) + if err != nil { + c.Fatal(err) + } + + // The reason the error is "Cannot locate specified Dockerfile" is because + // in the builder, the symlink is resolved within the context, therefore + // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is + // a nonexistent file. + if !strings.Contains(string(out), "Cannot locate specified Dockerfile: Dockerfile") { + c.Fatalf("Didn't complain about leaving build context: %s", out) + } +} + +// #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume +func (s *DockerSuite) TestPostContainerBindNormalVolume(c *check.C) { + dockerCmd(c, "create", "-v", "/foo", "--name=one", "busybox") + + fooDir, err := inspectMountSourceField("one", "/foo") + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox") + + bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} + status, _, err := sockRequest("POST", "/containers/two/start", bindSpec) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + fooDir2, err := inspectMountSourceField("two", "/foo") + if err != nil { + c.Fatal(err) + } + + if fooDir2 != fooDir { + c.Fatalf("expected volume path to be %s, got: %s", fooDir, fooDir2) + } +} + +func (s *DockerSuite) TestContainerApiPause(c *check.C) { + defer unpauseAllContainers() + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "30") + ContainerID := strings.TrimSpace(out) + + status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + pausedContainers, err := getSliceOfPausedContainers() + + if err != nil { + c.Fatalf("error thrown while checking if containers were paused: %v", err) + } + + if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { + c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) + } + + status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + pausedContainers, err = getSliceOfPausedContainers() + + if err != nil { + c.Fatalf("error thrown while checking if containers were paused: %v", err) + } + + if pausedContainers != nil { + c.Fatalf("There should be no paused container.") + } +} + +func (s *DockerSuite) TestContainerApiTop(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top") + id := strings.TrimSpace(string(out)) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := sockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + if err := json.Unmarshal(b, &top); err != nil { + c.Fatal(err) + } + + if len(top.Titles) != 11 { + c.Fatalf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles) + } + + if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { + c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles) + } + if len(top.Processes) != 2 { + c.Fatalf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes) + } + if top.Processes[0][10] != "/bin/sh -c top" { + c.Fatalf("expected `/bin/sh -c top`, found: %s", top.Processes[0][10]) + } + if top.Processes[1][10] != "top" { + c.Fatalf("expected `top`, found: %s", top.Processes[1][10]) + } +} + +func (s *DockerSuite) TestContainerApiCommit(c *check.C) { + cName := "testapicommit" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + name := "TestContainerApiCommit" + status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + if err := json.Unmarshal(b, &img); err != nil { + c.Fatal(err) + } + + cmd, err := inspectField(img.ID, "Config.Cmd") + if err != nil { + c.Fatal(err) + } + if cmd != "{[/bin/sh -c touch /test]}" { + c.Fatalf("got wrong Cmd from commit: %q", cmd) + } + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerApiCommitWithLabelInConfig(c *check.C) { + cName := "testapicommitwithconfig" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + config := map[string]interface{}{ + "Labels": map[string]string{"key1": "value1", "key2": "value2"}, + } + + name := "TestContainerApiCommitWithConfig" + status, b, err := sockRequest("POST", "/commit?repo="+name+"&container="+cName, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + if err := json.Unmarshal(b, &img); err != nil { + c.Fatal(err) + } + + label1, err := inspectFieldMap(img.ID, "Config.Labels", "key1") + if err != nil { + c.Fatal(err) + } + c.Assert(label1, check.Equals, "value1") + + label2, err := inspectFieldMap(img.ID, "Config.Labels", "key2") + if err != nil { + c.Fatal(err) + } + c.Assert(label2, check.Equals, "value2") + + cmd, err := inspectField(img.ID, "Config.Cmd") + if err != nil { + c.Fatal(err) + } + if cmd != "{[/bin/sh -c touch /test]}" { + c.Fatalf("got wrong Cmd from commit: %q", cmd) + } + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerApiBadPort(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "echo test"}, + "PortBindings": map[string]interface{}{ + "8080/tcp": []map[string]interface{}{ + { + "HostIP": "", + "HostPort": "aa80", + }, + }, + }, + } + + jsonData := bytes.NewBuffer(nil) + json.NewEncoder(jsonData).Encode(config) + + status, b, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + + if strings.TrimSpace(string(b)) != `Invalid port specification: "aa80"` { + c.Fatalf("Incorrect error msg: %s", string(b)) + } +} + +func (s *DockerSuite) TestContainerApiCreate(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, + } + + status, b, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + type createResp struct { + ID string + } + var container createResp + if err := json.Unmarshal(b, &container); err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "start", "-a", container.ID) + if strings.TrimSpace(out) != "/test" { + c.Fatalf("expected output `/test`, got %q", out) + } +} + +func (s *DockerSuite) TestContainerApiCreateEmptyConfig(c *check.C) { + config := map[string]interface{}{} + + status, b, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + + expected := "Config cannot be empty in order to create a container\n" + if body := string(b); body != expected { + c.Fatalf("Expected to get %q, got %q", expected, body) + } +} + +func (s *DockerSuite) TestContainerApiCreateWithHostName(c *check.C) { + hostName := "test-host" + config := map[string]interface{}{ + "Image": "busybox", + "Hostname": hostName, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container types.ContainerCreateResponse + if err := json.Unmarshal(body, &container); err != nil { + c.Fatal(err) + } + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + if err := json.Unmarshal(body, &containerJSON); err != nil { + c.Fatal(err) + } + + if containerJSON.Config.Hostname != hostName { + c.Fatalf("Mismatched Hostname, Expected %s, Actual: %s ", hostName, containerJSON.Config.Hostname) + } +} + +func (s *DockerSuite) TestContainerApiCreateWithDomainName(c *check.C) { + domainName := "test-domain" + config := map[string]interface{}{ + "Image": "busybox", + "Domainname": domainName, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container types.ContainerCreateResponse + if err := json.Unmarshal(body, &container); err != nil { + c.Fatal(err) + } + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + if err := json.Unmarshal(body, &containerJSON); err != nil { + c.Fatal(err) + } + + if containerJSON.Config.Domainname != domainName { + c.Fatalf("Mismatched Domainname, Expected %s, Actual: %s ", domainName, containerJSON.Config.Domainname) + } +} + +func (s *DockerSuite) TestContainerApiCreateNetworkMode(c *check.C) { + UtilCreateNetworkMode(c, "host") + UtilCreateNetworkMode(c, "bridge") + UtilCreateNetworkMode(c, "container:web1") +} + +func UtilCreateNetworkMode(c *check.C, networkMode string) { + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"NetworkMode": networkMode}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container types.ContainerCreateResponse + if err := json.Unmarshal(body, &container); err != nil { + c.Fatal(err) + } + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + if err := json.Unmarshal(body, &containerJSON); err != nil { + c.Fatal(err) + } + + if containerJSON.HostConfig.NetworkMode != runconfig.NetworkMode(networkMode) { + c.Fatalf("Mismatched NetworkMode, Expected %s, Actual: %s ", networkMode, containerJSON.HostConfig.NetworkMode) + } +} + +func (s *DockerSuite) TestContainerApiCreateWithCpuSharesCpuset(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + "CpuShares": 512, + "CpusetCpus": "0,1", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container types.ContainerCreateResponse + if err := json.Unmarshal(body, &container); err != nil { + c.Fatal(err) + } + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + out, err := inspectField(containerJSON.Id, "HostConfig.CpuShares") + c.Assert(err, check.IsNil) + c.Assert(out, check.Equals, "512") + + outCpuset, errCpuset := inspectField(containerJSON.Id, "HostConfig.CpusetCpus") + c.Assert(errCpuset, check.IsNil, check.Commentf("Output: %s", outCpuset)) + c.Assert(outCpuset, check.Equals, "0,1") +} + +func (s *DockerSuite) TestContainerApiVerifyHeader(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + } + + create := func(ct string) (*http.Response, io.ReadCloser, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(config); err != nil { + c.Fatal(err) + } + return sockRequestRaw("POST", "/containers/create", jsonData, ct) + } + + // Try with no content-type + res, body, err := create("") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + body.Close() + + // Try with wrong content-type + res, body, err = create("application/xml") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + body.Close() + + // now application/json + res, body, err = create("application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusCreated) + body.Close() +} + +//Issue 14230. daemon should return 500 for invalid port syntax +func (s *DockerSuite) TestContainerApiInvalidPortSyntax(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "PortBindings": { + "19039;1230": [ + {} + ] + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + if err != nil { + c.Fatal(err) + } + c.Assert(strings.Contains(string(b[:]), "Invalid port"), check.Equals, true) +} + +// Issue 7941 - test to make sure a "null" in JSON is just ignored. +// W/o this fix a null in JSON would be parsed into a string var as "null" +func (s *DockerSuite) TestContainerApiPostCreateNull(c *check.C) { + config := `{ + "Hostname":"", + "Domainname":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "Cpuset":null, + "AttachStdin":true, + "AttachStdout":true, + "AttachStderr":true, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":true, + "Env":[], + "Cmd":"ls", + "Image":"busybox", + "Volumes":{}, + "WorkingDir":"", + "Entrypoint":null, + "NetworkDisabled":false, + "OnBuild":null}` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusCreated) + + b, err := readBody(body) + if err != nil { + c.Fatal(err) + } + type createResp struct { + ID string + } + var container createResp + if err := json.Unmarshal(b, &container); err != nil { + c.Fatal(err) + } + + out, err := inspectField(container.ID, "HostConfig.CpusetCpus") + if err != nil { + c.Fatal(err, out) + } + if out != "" { + c.Fatalf("expected empty string, got %q", out) + } + + outMemory, errMemory := inspectField(container.ID, "HostConfig.Memory") + c.Assert(outMemory, check.Equals, "0") + if errMemory != nil { + c.Fatal(errMemory, outMemory) + } + outMemorySwap, errMemorySwap := inspectField(container.ID, "HostConfig.MemorySwap") + c.Assert(outMemorySwap, check.Equals, "0") + if errMemorySwap != nil { + c.Fatal(errMemorySwap, outMemorySwap) + } +} + +func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { + config := `{ + "Image": "busybox", + "Cmd": "ls", + "OpenStdin": true, + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, check.IsNil) + b, err2 := readBody(body) + if err2 != nil { + c.Fatal(err2) + } + + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + c.Assert(strings.Contains(string(b), "Minimum memory limit allowed is 4MB"), check.Equals, true) +} + +func (s *DockerSuite) TestStartWithTooLowMemoryLimit(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox") + + containerID := strings.TrimSpace(out) + + config := `{ + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := sockRequestRaw("POST", "/containers/"+containerID+"/start", strings.NewReader(config), "application/json") + c.Assert(err, check.IsNil) + b, err2 := readBody(body) + if err2 != nil { + c.Fatal(err2) + } + + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + c.Assert(strings.Contains(string(b), "Minimum memory limit allowed is 4MB"), check.Equals, true) +} + +func (s *DockerSuite) TestContainerApiRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "TestContainerApiRename", "-d", "busybox", "sh") + + containerID := strings.TrimSpace(out) + newName := "TestContainerApiRenameNew" + statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil) + c.Assert(err, check.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, check.Equals, http.StatusNoContent) + + name, err := inspectField(containerID, "Name") + if name != "/"+newName { + c.Fatalf("Failed to rename container, expected %v, got %v. Container rename API failed", newName, name) + } +} + +func (s *DockerSuite) TestContainerApiKill(c *check.C) { + name := "test-api-kill" + dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") + + status, _, err := sockRequest("POST", "/containers/"+name+"/kill", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + state, err := inspectField(name, "State.Running") + if err != nil { + c.Fatal(err) + } + if state != "false" { + c.Fatalf("got wrong State from container %s: %q", name, state) + } +} + +func (s *DockerSuite) TestContainerApiRestart(c *check.C) { + name := "test-api-restart" + dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") + + status, _, err := sockRequest("POST", "/containers/"+name+"/restart?t=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + if err := waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 5); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestContainerApiRestartNotimeoutParam(c *check.C) { + name := "test-api-restart-no-timeout-param" + out, _ := dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + status, _, err := sockRequest("POST", "/containers/"+name+"/restart", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + if err := waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 5); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestContainerApiStart(c *check.C) { + name := "testing-start" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "/bin/top"}, + "OpenStdin": true, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + conf := make(map[string]interface{}) + status, _, err = sockRequest("POST", "/containers/"+name+"/start", conf) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + // second call to start should give 304 + status, _, err = sockRequest("POST", "/containers/"+name+"/start", conf) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotModified) +} + +func (s *DockerSuite) TestContainerApiStop(c *check.C) { + name := "test-api-stop" + dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") + + status, _, err := sockRequest("POST", "/containers/"+name+"/stop?t=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + if err := waitInspect(name, "{{ .State.Running }}", "false", 5); err != nil { + c.Fatal(err) + } + + // second call to start should give 304 + status, _, err = sockRequest("POST", "/containers/"+name+"/stop?t=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotModified) +} + +func (s *DockerSuite) TestContainerApiWait(c *check.C) { + name := "test-api-wait" + dockerCmd(c, "run", "--name", name, "busybox", "sleep", "5") + + status, body, err := sockRequest("POST", "/containers/"+name+"/wait", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + if err := waitInspect(name, "{{ .State.Running }}", "false", 5); err != nil { + c.Fatal(err) + } + + var waitres types.ContainerWaitResponse + if err := json.Unmarshal(body, &waitres); err != nil { + c.Fatalf("unable to unmarshal response body: %v", err) + } + + if waitres.StatusCode != 0 { + c.Fatalf("Expected wait response StatusCode to be 0, got %d", waitres.StatusCode) + } +} + +func (s *DockerSuite) TestContainerApiCopy(c *check.C) { + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if h.Name == "test.txt" { + found = true + break + } + } + c.Assert(found, check.Equals, true) +} + +func (s *DockerSuite) TestContainerApiCopyResourcePathEmpty(c *check.C) { + name := "test-container-api-copy-resource-empty" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "", + } + + status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(string(body), check.Matches, "Path cannot be empty\n") +} + +func (s *DockerSuite) TestContainerApiCopyResourcePathNotFound(c *check.C) { + name := "test-container-api-copy-resource-not-found" + dockerCmd(c, "run", "--name", name, "busybox") + + postData := types.CopyConfig{ + Resource: "/notexist", + } + + status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(string(body), check.Matches, "Could not find the file /notexist in container "+name+"\n") +} + +func (s *DockerSuite) TestContainerApiCopyContainerNotFound(c *check.C) { + postData := types.CopyConfig{ + Resource: "/something", + } + + status, _, err := sockRequest("POST", "/containers/notexists/copy", postData) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerApiDelete(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "stop", id) + + status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerApiDeleteNotExist(c *check.C) { + status, body, err := sockRequest("DELETE", "/containers/doesnotexist", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + c.Assert(string(body), check.Matches, "no such id: doesnotexist\n") +} + +func (s *DockerSuite) TestContainerApiDeleteForce(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id+"?force=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerApiDeleteRemoveLinks(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--name", "tlink1", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + out, _ = dockerCmd(c, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top") + + id2 := strings.TrimSpace(out) + c.Assert(waitRun(id2), check.IsNil) + + links, err := inspectFieldJSON(id2, "HostConfig.Links") + c.Assert(err, check.IsNil) + + if links != "[\"/tlink1:/tlink2/tlink1\"]" { + c.Fatal("expected to have links between containers") + } + + status, _, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + linksPostRm, err := inspectFieldJSON(id2, "HostConfig.Links") + c.Assert(err, check.IsNil) + + if linksPostRm != "null" { + c.Fatal("call to api deleteContainer links should have removed the specified links") + } +} + +func (s *DockerSuite) TestContainerApiDeleteConflict(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusConflict) +} + +func (s *DockerSuite) TestContainerApiDeleteRemoveVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "-v", "/testvolume", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + source, err := inspectMountSourceField(id, "/testvolume") + _, err = os.Stat(source) + c.Assert(err, check.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) + + if _, err := os.Stat(source); !os.IsNotExist(err) { + c.Fatalf("expected to get ErrNotExist error, got %v", err) + } +} + +// Regression test for https://github.com/docker/docker/issues/6231 +func (s *DockerSuite) TestContainersApiChunkedEncoding(c *check.C) { + out, _ := dockerCmd(c, "create", "-v", "/foo", "busybox", "true") + id := strings.TrimSpace(out) + + conn, err := sockConn(time.Duration(10 * time.Second)) + if err != nil { + c.Fatal(err) + } + client := httputil.NewClientConn(conn, nil) + defer client.Close() + + bindCfg := strings.NewReader(`{"Binds": ["/tmp:/foo"]}`) + req, err := http.NewRequest("POST", "/containers/"+id+"/start", bindCfg) + if err != nil { + c.Fatal(err) + } + req.Header.Set("Content-Type", "application/json") + // This is a cheat to make the http request do chunked encoding + // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite + // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 + req.ContentLength = -1 + + resp, err := client.Do(req) + if err != nil { + c.Fatalf("error starting container with chunked encoding: %v", err) + } + resp.Body.Close() + if resp.StatusCode != 204 { + c.Fatalf("expected status code 204, got %d", resp.StatusCode) + } + + out, err = inspectFieldJSON(id, "HostConfig.Binds") + if err != nil { + c.Fatal(err) + } + + var binds []string + if err := json.NewDecoder(strings.NewReader(out)).Decode(&binds); err != nil { + c.Fatal(err) + } + if len(binds) != 1 { + c.Fatalf("got unexpected binds: %v", binds) + } + + expected := "/tmp:/foo" + if binds[0] != expected { + c.Fatalf("got incorrect bind spec, wanted %s, got: %s", expected, binds[0]) + } +} + +func (s *DockerSuite) TestPostContainerStop(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), check.IsNil) + + statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/stop", nil) + c.Assert(err, check.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, check.Equals, http.StatusNoContent) + + if err := waitInspect(containerID, "{{ .State.Running }}", "false", 5); err != nil { + c.Fatal(err) + } +} + +// #14170 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceEntrypoint(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd []string + }{"busybox", "echo", []string{"hello", "world"}} + _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + c.Assert(err, check.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), check.Equals, "hello world") + + config2 := struct { + Image string + Entrypoint []string + Cmd []string + }{"busybox", []string{"echo"}, []string{"hello", "world"}} + _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + c.Assert(err, check.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), check.Equals, "hello world") +} + +// #14170 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd string + }{"busybox", "echo", "hello world"} + _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + c.Assert(err, check.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), check.Equals, "hello world") + + config2 := struct { + Image string + Cmd []string + }{"busybox", []string{"echo", "hello", "world"}} + _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + c.Assert(err, check.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), check.Equals, "hello world") +} + +// regression #14318 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *check.C) { + config := struct { + Image string + CapAdd string + CapDrop string + }{"busybox", "NET_ADMIN", "SYS_ADMIN"} + status, _, err := sockRequest("POST", "/containers/create?name=capaddtest0", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + config2 := struct { + Image string + CapAdd []string + CapDrop []string + }{"busybox", []string{"NET_ADMIN", "SYS_ADMIN"}, []string{"SETGID"}} + status, _, err = sockRequest("POST", "/containers/create?name=capaddtest1", config2) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) +} + +// #14640 +func (s *DockerSuite) TestPostContainersStartWithoutLinksInHostConfig(c *check.C) { + name := "test-host-config-links" + dockerCmd(c, "create", "--name", name, "busybox", "top") + + hc, err := inspectFieldJSON(name, "HostConfig") + c.Assert(err, check.IsNil) + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", "/containers/"+name+"/start", strings.NewReader(config), "application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestPostContainersStartWithLinksInHostConfig(c *check.C) { + name := "test-host-config-links" + dockerCmd(c, "run", "--name", "foo", "-d", "busybox", "top") + dockerCmd(c, "create", "--name", name, "--link", "foo:bar", "busybox", "top") + + hc, err := inspectFieldJSON(name, "HostConfig") + c.Assert(err, check.IsNil) + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", "/containers/"+name+"/start", strings.NewReader(config), "application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestPostContainersStartWithLinksInHostConfigIdLinked(c *check.C) { + name := "test-host-config-links" + out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top") + id := strings.TrimSpace(out) + dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top") + + hc, err := inspectFieldJSON(name, "HostConfig") + c.Assert(err, check.IsNil) + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", "/containers/"+name+"/start", strings.NewReader(config), "application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusNoContent) + b.Close() +} + +// #14915 +func (s *DockerSuite) TestContainersApiCreateNoHostConfig118(c *check.C) { + config := struct { + Image string + }{"busybox"} + status, _, err := sockRequest("POST", "/v1.18/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) +} + +// Ensure an error occurs when you have a container read-only rootfs but you +// extract an archive to a symlink in a writable volume which points to a +// directory outside of the volume. +func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) { + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol := getTestDir(c, "test-put-container-archive-err-symlink-in-volume-to-read-only-rootfs-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + cID := makeTestContainer(c, testContainerOptions{ + readOnly: true, + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + defer deleteContainer(cID) + + // Attempt to extract to a symlink in the volume which points to a + // directory outside the volume. This should cause an error because the + // rootfs is read-only. + query := make(url.Values, 1) + query.Set("path", "/vol2/symlinkToAbsDir") + urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode()) + + statusCode, body, err := sockRequest("PUT", urlPath, nil) + c.Assert(err, check.IsNil) + + if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) { + c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body)) + } +} diff --git a/integration-cli/docker_api_events_test.go b/integration-cli/docker_api_events_test.go new file mode 100644 index 00000000..8ae7bf73 --- /dev/null +++ b/integration-cli/docker_api_events_test.go @@ -0,0 +1,29 @@ +package main + +import ( + "net/http" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsApiEmptyOutput(c *check.C) { + type apiResp struct { + resp *http.Response + err error + } + chResp := make(chan *apiResp) + go func() { + resp, body, err := sockRequestRaw("GET", "/events", nil, "") + body.Close() + chResp <- &apiResp{resp, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, check.IsNil) + c.Assert(r.resp.StatusCode, check.Equals, http.StatusOK) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for events api to respond, should have responded immediately") + } +} diff --git a/integration-cli/docker_api_exec_resize_test.go b/integration-cli/docker_api_exec_resize_test.go new file mode 100644 index 00000000..01061ca6 --- /dev/null +++ b/integration-cli/docker_api_exec_resize_test.go @@ -0,0 +1,18 @@ +package main + +import ( + "net/http" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExecResizeApiHeightWidthNoInt(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) +} diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go new file mode 100644 index 00000000..3d99fe6b --- /dev/null +++ b/integration-cli/docker_api_exec_test.go @@ -0,0 +1,47 @@ +// +build !test_no_exec + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + + "github.com/go-check/check" +) + +// Regression test for #9414 +func (s *DockerSuite) TestExecApiCreateNoCmd(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + + if !bytes.Contains(body, []byte("No exec command specified")) { + c.Fatalf("Expected message when creating exec command with no Cmd specified") + } +} + +func (s *DockerSuite) TestExecApiCreateNoValidContentType(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(map[string]interface{}{"Cmd": nil}); err != nil { + c.Fatalf("Can not encode data to json %s", err) + } + + res, body, err := sockRequestRaw("POST", fmt.Sprintf("/containers/%s/exec", name), jsonData, "text/plain") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, check.IsNil) + + if !bytes.Contains(b, []byte("Content-Type specified")) { + c.Fatalf("Expected message when creating exec command with invalid Content-Type specified") + } +} diff --git a/integration-cli/docker_api_images_test.go b/integration-cli/docker_api_images_test.go new file mode 100644 index 00000000..339a2f48 --- /dev/null +++ b/integration-cli/docker_api_images_test.go @@ -0,0 +1,133 @@ +package main + +import ( + "encoding/json" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestApiImagesFilter(c *check.C) { + name := "utest:tag1" + name2 := "utest/docker:tag2" + name3 := "utest:5000/docker:tag3" + for _, n := range []string{name, name2, name3} { + dockerCmd(c, "tag", "busybox", n) + } + type image types.Image + getImages := func(filter string) []image { + v := url.Values{} + v.Set("filter", filter) + status, b, err := sockRequest("GET", "/images/json?"+v.Encode(), nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var images []image + if err := json.Unmarshal(b, &images); err != nil { + c.Fatal(err) + } + + return images + } + + errMsg := "incorrect number of matches returned" + if images := getImages("utest*/*"); len(images[0].RepoTags) != 2 { + c.Fatal(errMsg) + } + if images := getImages("utest"); len(images[0].RepoTags) != 1 { + c.Fatal(errMsg) + } + if images := getImages("utest*"); len(images[0].RepoTags) != 1 { + c.Fatal(errMsg) + } + if images := getImages("*5000*/*"); len(images[0].RepoTags) != 1 { + c.Fatal(errMsg) + } +} + +func (s *DockerSuite) TestApiImagesSaveAndLoad(c *check.C) { + testRequires(c, Network) + out, err := buildImage("saveandload", "FROM hello-world\nENV FOO bar", false) + if err != nil { + c.Fatal(err) + } + id := strings.TrimSpace(out) + + res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + + defer body.Close() + + dockerCmd(c, "rmi", id) + + res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + + defer loadBody.Close() + + inspectOut, _ := dockerCmd(c, "inspect", "--format='{{ .Id }}'", id) + if strings.TrimSpace(string(inspectOut)) != id { + c.Fatal("load did not work properly") + } +} + +func (s *DockerSuite) TestApiImagesDelete(c *check.C) { + testRequires(c, Network) + name := "test-api-images-delete" + out, err := buildImage(name, "FROM hello-world\nENV FOO bar", false) + if err != nil { + c.Fatal(err) + } + id := strings.TrimSpace(out) + + dockerCmd(c, "tag", name, "test:tag1") + + status, _, err := sockRequest("DELETE", "/images/"+id, nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusConflict) + + status, _, err = sockRequest("DELETE", "/images/test:noexist", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) //Status Codes:404 – no such image + + status, _, err = sockRequest("DELETE", "/images/test:tag1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestApiImagesHistory(c *check.C) { + testRequires(c, Network) + name := "test-api-images-history" + out, err := buildImage(name, "FROM hello-world\nENV FOO bar", false) + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + + status, body, err := sockRequest("GET", "/images/"+id+"/history", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var historydata []types.ImageHistory + if err = json.Unmarshal(body, &historydata); err != nil { + c.Fatalf("Error on unmarshal: %s", err) + } + + c.Assert(len(historydata), check.Not(check.Equals), 0) + c.Assert(historydata[0].Tags[0], check.Equals, "test-api-images-history:latest") +} + +// #14846 +func (s *DockerSuite) TestApiImagesSearchJSONContentType(c *check.C) { + testRequires(c, Network) + + res, b, err := sockRequestRaw("GET", "/images/search?term=test", nil, "application/json") + c.Assert(err, check.IsNil) + b.Close() + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + c.Assert(res.Header.Get("Content-Type"), check.Equals, "application/json") +} diff --git a/integration-cli/docker_api_info_test.go b/integration-cli/docker_api_info_test.go new file mode 100644 index 00000000..40842891 --- /dev/null +++ b/integration-cli/docker_api_info_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "net/http" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoApi(c *check.C) { + endpoint := "/info" + + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + // always shown fields + stringsToCheck := []string{ + "ID", + "Containers", + "Images", + "ExecutionDriver", + "LoggingDriver", + "OperatingSystem", + "NCPU", + "MemTotal", + "KernelVersion", + "Driver"} + + out := string(body) + for _, linePrefix := range stringsToCheck { + if !strings.Contains(out, linePrefix) { + c.Errorf("couldn't find string %v in output", linePrefix) + } + } +} diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go new file mode 100644 index 00000000..2287e7d4 --- /dev/null +++ b/integration-cli/docker_api_inspect_test.go @@ -0,0 +1,50 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectApiContainerResponse(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + keysBase := []string{"Id", "State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", + "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "GraphDriver"} + + cases := []struct { + version string + keys []string + }{ + {"1.20", append(keysBase, "Mounts")}, + {"1.19", append(keysBase, "Volumes", "VolumesRW")}, + } + + for _, cs := range cases { + endpoint := fmt.Sprintf("/v%s/containers/%s/json", cs.version, cleanedContainerID) + + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var inspectJSON map[string]interface{} + if err = json.Unmarshal(body, &inspectJSON); err != nil { + c.Fatalf("unable to unmarshal body for version %s: %v", cs.version, err) + } + + for _, key := range cs.keys { + if _, ok := inspectJSON[key]; !ok { + c.Fatalf("%s does not exist in response for version %s", key, cs.version) + } + } + + //Issue #6830: type not properly converted to JSON/back + if _, ok := inspectJSON["Path"].(bool); ok { + c.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling") + } + } +} diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go new file mode 100644 index 00000000..d4784475 --- /dev/null +++ b/integration-cli/docker_api_logs_test.go @@ -0,0 +1,83 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "net/http" + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLogsApiWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done") + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + type logOut struct { + out string + res *http.Response + err error + } + chLog := make(chan logOut) + + go func() { + res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id), nil, "") + if err != nil { + chLog <- logOut{"", nil, err} + return + } + defer body.Close() + out, err := bufio.NewReader(body).ReadString('\n') + if err != nil { + chLog <- logOut{"", nil, err} + return + } + chLog <- logOut{strings.TrimSpace(out), res, err} + }() + + select { + case l := <-chLog: + c.Assert(l.err, check.IsNil) + c.Assert(l.res.StatusCode, check.Equals, http.StatusOK) + if !strings.HasSuffix(l.out, "hello") { + c.Fatalf("expected log output to container 'hello', but it does not") + } + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for logs to exit") + } +} + +func (s *DockerSuite) TestLogsApiNoStdoutNorStderr(c *check.C) { + name := "logs_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil) + c.Assert(status, check.Equals, http.StatusBadRequest) + c.Assert(err, check.IsNil) + + expected := "Bad parameters: you must choose at least one stream" + if !bytes.Contains(body, []byte(expected)) { + c.Fatalf("Expected %s, got %s", expected, string(body[:])) + } +} + +// Regression test for #12704 +func (s *DockerSuite) TestLogsApiFollowEmptyOutput(c *check.C) { + name := "logs_test" + t0 := time.Now() + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10") + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") + t1 := time.Now() + c.Assert(err, check.IsNil) + body.Close() + elapsed := t1.Sub(t0).Seconds() + if elapsed > 5.0 { + c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed) + } +} diff --git a/integration-cli/docker_api_network_test.go b/integration-cli/docker_api_network_test.go new file mode 100644 index 00000000..44d2b31b --- /dev/null +++ b/integration-cli/docker_api_network_test.go @@ -0,0 +1,72 @@ +// +build experimental + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/go-check/check" +) + +func isNetworkAvailable(c *check.C, name string) bool { + status, body, err := sockRequest("GET", "/networks", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var inspectJSON []struct { + Name string + ID string + Type string + } + if err = json.Unmarshal(body, &inspectJSON); err != nil { + c.Fatalf("unable to unmarshal response body: %v", err) + } + for _, n := range inspectJSON { + if n.Name == name { + return true + } + } + return false + +} + +func (s *DockerSuite) TestNetworkApiGetAll(c *check.C) { + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + if !isNetworkAvailable(c, nn) { + c.Fatalf("Missing Default network : %s", nn) + } + } +} + +func (s *DockerSuite) TestNetworkApiCreateDelete(c *check.C) { + name := "testnetwork" + config := map[string]interface{}{ + "name": name, + "network_type": "bridge", + } + + status, resp, err := sockRequest("POST", "/networks", config) + c.Assert(status, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) + + if !isNetworkAvailable(c, name) { + c.Fatalf("Network %s not found", name) + } + + var id string + err = json.Unmarshal(resp, &id) + if err != nil { + c.Fatal(err) + } + + status, _, err = sockRequest("DELETE", fmt.Sprintf("/networks/%s", id), nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + if isNetworkAvailable(c, name) { + c.Fatalf("Network %s not deleted", name) + } +} diff --git a/integration-cli/docker_api_resize_test.go b/integration-cli/docker_api_resize_test.go new file mode 100644 index 00000000..c7a577b8 --- /dev/null +++ b/integration-cli/docker_api_resize_test.go @@ -0,0 +1,45 @@ +package main + +import ( + "net/http" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestResizeApiResponse(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeApiHeightWidthNoInt(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeApiResponseWhenContainerNotStarted(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + // make sure the exited container is not running + dockerCmd(c, "wait", cleanedContainerID) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, body, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) + + if !strings.Contains(string(body), "Cannot resize container") && !strings.Contains(string(body), cleanedContainerID) { + c.Fatalf("resize should fail with message 'Cannot resize container' but instead received %s", string(body)) + } +} diff --git a/integration-cli/docker_api_service_test.go b/integration-cli/docker_api_service_test.go new file mode 100644 index 00000000..df072197 --- /dev/null +++ b/integration-cli/docker_api_service_test.go @@ -0,0 +1,113 @@ +// +build experimental + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/go-check/check" +) + +func isServiceAvailable(c *check.C, name string, network string) bool { + status, body, err := sockRequest("GET", "/services", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var inspectJSON []struct { + Name string + ID string + Network string + } + if err = json.Unmarshal(body, &inspectJSON); err != nil { + c.Fatalf("unable to unmarshal response body: %v", err) + } + for _, s := range inspectJSON { + if s.Name == name && s.Network == network { + return true + } + } + return false + +} + +func isServiceNetworkAvailable(c *check.C, name string) bool { + status, body, err := sockRequest("GET", "/networks", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var inspectJSON []struct { + Name string + ID string + Type string + } + if err = json.Unmarshal(body, &inspectJSON); err != nil { + c.Fatalf("unable to unmarshal response body: %v", err) + } + for _, n := range inspectJSON { + if n.Name == name { + return true + } + } + return false + +} + +func (s *DockerSuite) TestServiceApiCreateDelete(c *check.C) { + name := "testnetwork" + config := map[string]interface{}{ + "name": name, + "network_type": "bridge", + } + + status, resp, err := sockRequest("POST", "/networks", config) + c.Assert(status, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) + + if !isServiceNetworkAvailable(c, name) { + c.Fatalf("Network %s not found", name) + } + + var nid string + err = json.Unmarshal(resp, &nid) + if err != nil { + c.Fatal(err) + } + + sname := "service1" + sconfig := map[string]interface{}{ + "name": sname, + "network_name": name, + } + + status, resp, err = sockRequest("POST", "/services", sconfig) + c.Assert(status, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) + + if !isServiceAvailable(c, sname, name) { + c.Fatalf("Service %s.%s not found", sname, name) + } + + var id string + err = json.Unmarshal(resp, &id) + if err != nil { + c.Fatal(err) + } + + status, _, err = sockRequest("DELETE", fmt.Sprintf("/services/%s", id), nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + if isServiceAvailable(c, sname, name) { + c.Fatalf("Service %s.%s not deleted", sname, name) + } + + status, _, err = sockRequest("DELETE", fmt.Sprintf("/networks/%s", nid), nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + if isNetworkAvailable(c, name) { + c.Fatalf("Network %s not deleted", name) + } +} diff --git a/integration-cli/docker_api_stats_test.go b/integration-cli/docker_api_stats_test.go new file mode 100644 index 00000000..f019e00d --- /dev/null +++ b/integration-cli/docker_api_stats_test.go @@ -0,0 +1,120 @@ +package main + +import ( + "encoding/json" + "fmt" + "os/exec" + "runtime" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCliStatsNoStreamGetCpu(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;do echo 'Hello'; usleep 100000; done") + + id := strings.TrimSpace(out) + err := waitRun(id) + c.Assert(err, check.IsNil) + + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, check.IsNil) + c.Assert(resp.ContentLength > 0, check.Equals, true, check.Commentf("should not use chunked encoding")) + c.Assert(resp.Header.Get("Content-Type"), check.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, check.IsNil) + body.Close() + + var cpuPercent = 0.0 + cpuDelta := float64(v.CpuStats.CpuUsage.TotalUsage - v.PreCpuStats.CpuUsage.TotalUsage) + systemDelta := float64(v.CpuStats.SystemUsage - v.PreCpuStats.SystemUsage) + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CpuStats.CpuUsage.PercpuUsage)) * 100.0 + if cpuPercent == 0 { + c.Fatalf("docker stats with no-stream get cpu usage failed: was %v", cpuPercent) + } +} + +func (s *DockerSuite) TestStoppedContainerStatsGoroutines(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo 1") + id := strings.TrimSpace(out) + + getGoRoutines := func() int { + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/info"), nil, "") + c.Assert(err, check.IsNil) + info := types.Info{} + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, check.IsNil) + body.Close() + return info.NGoroutines + } + + // When the HTTP connection is closed, the number of goroutines should not increase. + routines := getGoRoutines() + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats", id), nil, "") + c.Assert(err, check.IsNil) + body.Close() + + t := time.After(30 * time.Second) + for { + select { + case <-t: + c.Assert(getGoRoutines() <= routines, check.Equals, true) + return + default: + if n := getGoRoutines(); n <= routines { + return + } + time.Sleep(200 * time.Millisecond) + } + } +} + +func (s *DockerSuite) TestApiNetworkStats(c *check.C) { + testRequires(c, SameHostDaemon) + // Run container for 30 secs + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + err := waitRun(id) + c.Assert(err, check.IsNil) + + // Retrieve the container address + contIP := findContainerIP(c, id) + numPings := 10 + + // Get the container networking stats before and after pinging the container + nwStatsPre := getNetworkStats(c, id) + countParam := "-c" + if runtime.GOOS == "windows" { + countParam = "-n" // Ping count parameter is -n on Windows + } + pingout, err := exec.Command("ping", contIP, countParam, strconv.Itoa(numPings)).Output() + pingouts := string(pingout[:]) + c.Assert(err, check.IsNil) + nwStatsPost := getNetworkStats(c, id) + + // Verify the stats contain at least the expected number of packets (account for ARP) + expRxPkts := 1 + nwStatsPre.RxPackets + uint64(numPings) + expTxPkts := 1 + nwStatsPre.TxPackets + uint64(numPings) + c.Assert(nwStatsPost.TxPackets >= expTxPkts, check.Equals, true, + check.Commentf("Reported less TxPackets than expected. Expected >= %d. Found %d. %s", expTxPkts, nwStatsPost.TxPackets, pingouts)) + c.Assert(nwStatsPost.RxPackets >= expRxPkts, check.Equals, true, + check.Commentf("Reported less Txbytes than expected. Expected >= %d. Found %d. %s", expRxPkts, nwStatsPost.RxPackets, pingouts)) +} + +func getNetworkStats(c *check.C, id string) types.Network { + var st *types.Stats + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, check.IsNil) + + err = json.NewDecoder(body).Decode(&st) + c.Assert(err, check.IsNil) + body.Close() + + return st.Network +} diff --git a/integration-cli/docker_api_test.go b/integration-cli/docker_api_test.go new file mode 100644 index 00000000..6cbf3019 --- /dev/null +++ b/integration-cli/docker_api_test.go @@ -0,0 +1,73 @@ +package main + +import ( + "net/http" + "net/http/httputil" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestApiOptionsRoute(c *check.C) { + status, _, err := sockRequest("OPTIONS", "/", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestApiGetEnabledCors(c *check.C) { + res, body, err := sockRequestRaw("GET", "/version", nil, "") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + body.Close() + // TODO: @runcom incomplete tests, why old integration tests had this headers + // and here none of the headers below are in the response? + //c.Log(res.Header) + //c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*") + //c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") +} + +func (s *DockerSuite) TestApiVersionStatusCode(c *check.C) { + conn, err := sockConn(time.Duration(10 * time.Second)) + c.Assert(err, check.IsNil) + + client := httputil.NewClientConn(conn, nil) + defer client.Close() + + req, err := http.NewRequest("GET", "/v999.0/version", nil) + c.Assert(err, check.IsNil) + req.Header.Set("User-Agent", "Docker-Client/999.0 (os)") + + res, err := client.Do(req) + c.Assert(res.StatusCode, check.Equals, http.StatusBadRequest) +} + +func (s *DockerSuite) TestApiClientVersionNewerThanServer(c *check.C) { + v := strings.Split(string(api.Version), ".") + vMinInt, err := strconv.Atoi(v[1]) + c.Assert(err, check.IsNil) + vMinInt++ + v[1] = strconv.Itoa(vMinInt) + version := strings.Join(v, ".") + + status, body, err := sockRequest("GET", "/v"+version+"/version", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusBadRequest) + c.Assert(len(string(body)), check.Not(check.Equals), 0) // Expected not empty body +} + +func (s *DockerSuite) TestApiClientVersionOldNotSupported(c *check.C) { + v := strings.Split(string(api.MinVersion), ".") + vMinInt, err := strconv.Atoi(v[1]) + c.Assert(err, check.IsNil) + vMinInt-- + v[1] = strconv.Itoa(vMinInt) + version := strings.Join(v, ".") + + status, body, err := sockRequest("GET", "/v"+version+"/version", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusBadRequest) + c.Assert(len(string(body)), check.Not(check.Equals), 0) // Expected not empty body +} diff --git a/integration-cli/docker_api_version_test.go b/integration-cli/docker_api_version_test.go new file mode 100644 index 00000000..b756794c --- /dev/null +++ b/integration-cli/docker_api_version_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestGetVersion(c *check.C) { + status, body, err := sockRequest("GET", "/version", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var v types.Version + if err := json.Unmarshal(body, &v); err != nil { + c.Fatal(err) + } + + if v.Version != dockerversion.VERSION { + c.Fatal("Version mismatch") + } +} diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go new file mode 100644 index 00000000..f45f775e --- /dev/null +++ b/integration-cli/docker_cli_attach_test.go @@ -0,0 +1,176 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strings" + "sync" + "time" + + "github.com/go-check/check" +) + +const attachWait = 5 * time.Second + +func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { + + endGroup := &sync.WaitGroup{} + startGroup := &sync.WaitGroup{} + endGroup.Add(3) + startGroup.Add(3) + + if err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done"); err != nil { + c.Fatal(err) + } + + startDone := make(chan struct{}) + endDone := make(chan struct{}) + + go func() { + endGroup.Wait() + close(endDone) + }() + + go func() { + startGroup.Wait() + close(startDone) + }() + + for i := 0; i < 3; i++ { + go func() { + cmd := exec.Command(dockerBinary, "attach", "attacher") + + defer func() { + cmd.Wait() + endGroup.Done() + }() + + out, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + buf := make([]byte, 1024) + + if _, err := out.Read(buf); err != nil && err != io.EOF { + c.Fatal(err) + } + + startGroup.Done() + + if !strings.Contains(string(buf), "hello") { + c.Fatalf("unexpected output %s expected hello\n", string(buf)) + } + }() + } + + select { + case <-startDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not initialize properly") + } + + dockerCmd(c, "kill", "attacher") + + select { + case <-endDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not finish properly") + } + +} + +func (s *DockerSuite) TestAttachTtyWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + defer func() { + cmd := exec.Command(dockerBinary, "kill", id) + if out, _, err := runCommandWithOutput(cmd); err != nil { + c.Fatalf("failed to kill container: %v (%v)", out, err) + } + }() + + done := make(chan error) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "attach", id) + if _, err := cmd.StdinPipe(); err != nil { + done <- err + return + } + + expected := "cannot enable tty mode" + if out, _, err := runCommandWithOutput(cmd); err == nil { + done <- fmt.Errorf("attach should have failed") + return + } else if !strings.Contains(out, expected) { + done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-done: + c.Assert(err, check.IsNil) + case <-time.After(attachWait): + c.Fatal("attach is running but should have failed") + } +} + +func (s *DockerSuite) TestAttachDisconnect(c *check.C) { + out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") + id := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "attach", id) + stdin, err := cmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer stdout.Close() + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + defer cmd.Process.Kill() + + if _, err := stdin.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + out, err = bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + if err := stdin.Close(); err != nil { + c.Fatal(err) + } + + // Expect container to still be running after stdin is closed + running, err := inspectField(id, "State.Running") + if err != nil { + c.Fatal(err) + } + if running != "true" { + c.Fatal("expected container to still be running") + } + +} diff --git a/integration-cli/docker_cli_attach_unix_test.go b/integration-cli/docker_cli_attach_unix_test.go new file mode 100644 index 00000000..9718dc07 --- /dev/null +++ b/integration-cli/docker_cli_attach_unix_test.go @@ -0,0 +1,279 @@ +// +build !windows + +package main + +import ( + "bufio" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #9860 +func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) { + + out, _ := dockerCmd(c, "run", "-dti", "busybox", "sleep", "2") + + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + errChan := make(chan error) + go func() { + defer close(errChan) + + _, tty, err := pty.Open() + if err != nil { + errChan <- err + return + } + attachCmd := exec.Command(dockerBinary, "attach", id) + attachCmd.Stdin = tty + attachCmd.Stdout = tty + attachCmd.Stderr = tty + + if err := attachCmd.Run(); err != nil { + errChan <- err + return + } + }() + + dockerCmd(c, "wait", id) + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(attachWait): + c.Fatal("timed out without attach returning") + } + +} + +func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { + + name := "detachtest" + + cpty, tty, err := pty.Open() + if err != nil { + c.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + errChan := make(chan error) + go func() { + errChan <- cmd.Run() + close(errChan) + }() + + time.Sleep(500 * time.Millisecond) + if err := waitRun(name); err != nil { + c.Fatal(err) + } + cpty.Write([]byte{16}) + time.Sleep(100 * time.Millisecond) + cpty.Write([]byte{17}) + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("timeout while detaching") + } + + cpty, tty, err = pty.Open() + if err != nil { + c.Fatalf("Could not open pty: %v", err) + } + + cmd = exec.Command(dockerBinary, "attach", name) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + bytes := make([]byte, 10) + var nBytes int + readErr := make(chan error, 1) + + go func() { + time.Sleep(500 * time.Millisecond) + cpty.Write([]byte("\n")) + time.Sleep(500 * time.Millisecond) + + nBytes, err = cpty.Read(bytes) + cpty.Close() + readErr <- err + }() + + select { + case err := <-readErr: + c.Assert(err, check.IsNil) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for attach read") + } + + if err := cmd.Wait(); err != nil { + c.Fatal(err) + } + + if !strings.Contains(string(bytes[:nBytes]), "/ #") { + c.Fatalf("failed to get a new prompt. got %s", string(bytes[:nBytes])) + } + +} + +// TestAttachDetach checks that attach in tty mode can be detached using the long container ID +func (s *DockerSuite) TestAttachDetach(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer stdout.Close() + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + if err := waitRun(id); err != nil { + c.Fatalf("error waiting for container to start: %v", err) + } + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + out, err = bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write([]byte{16}); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte{17}); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running, err := inspectField(id, "State.Running") + if err != nil { + c.Fatal(err) + } + if running != "true" { + c.Fatal("expected container to still be running") + } + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} + +// TestAttachDetachTruncatedID checks that attach in tty mode can be detached +func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := stringid.TruncateID(strings.TrimSpace(out)) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer stdout.Close() + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + out, err = bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write([]byte{16}); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte{17}); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running, err := inspectField(id, "State.Running") + if err != nil { + c.Fatal(err) + } + if running != "true" { + c.Fatal("expected container to still be running") + } + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go new file mode 100644 index 00000000..6dc24df9 --- /dev/null +++ b/integration-cli/docker_cli_build_test.go @@ -0,0 +1,5420 @@ +package main + +import ( + "archive/tar" + "bufio" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "text/template" + "time" + + "github.com/docker/docker/builder/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { + name := "testbuildjsonemptyrun" + + _, err := buildImage( + name, + ` + FROM busybox + RUN [] + `, + true) + + if err != nil { + c.Fatal("error when dealing with a RUN statement with empty JSON array") + } + +} + +func (s *DockerSuite) TestBuildEmptyWhitespace(c *check.C) { + name := "testbuildemptywhitespace" + + _, err := buildImage( + name, + ` + FROM busybox + COPY + quux \ + bar + `, + true) + + if err == nil { + c.Fatal("no error when dealing with a COPY statement with no content on the same line") + } + +} + +func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { + name := "testbuildshcmdjsonentrypoint" + + _, err := buildImage( + name, + ` + FROM busybox + ENTRYPOINT ["/bin/echo"] + CMD echo test + `, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", name) + + if strings.TrimSpace(out) != "/bin/sh -c echo test" { + c.Fatalf("CMD did not contain /bin/sh -c : %s", out) + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM scratch + ENV user foo + USER ${user} + `, true) + if err != nil { + c.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.User") + if err != nil { + c.Fatal(err) + } + + if res != `"foo"` { + c.Fatal("User foo from environment not in Config.User on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM scratch + ENV volume /quux + VOLUME ${volume} + `, true) + if err != nil { + c.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + c.Fatal(err) + } + + var volumes map[string]interface{} + + if err := json.Unmarshal([]byte(res), &volumes); err != nil { + c.Fatal(err) + } + + if _, ok := volumes["/quux"]; !ok { + c.Fatal("Volume /quux from environment not in Config.Volumes on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM scratch + ENV port 80 + EXPOSE ${port} + `, true) + if err != nil { + c.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.ExposedPorts") + if err != nil { + c.Fatal(err) + } + + var exposedPorts map[string]interface{} + + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + if _, ok := exposedPorts["80/tcp"]; !ok { + c.Fatal("Exposed port 80 from environment not in Config.ExposedPorts on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM busybox + ENV MYWORKDIR /work + RUN mkdir ${MYWORKDIR} + WORKDIR ${MYWORKDIR} + `, true) + + if err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { + name := "testbuildenvironmentreplacement" + + ctx, err := fakeContext(` + FROM scratch + ENV baz foo + ENV quux bar + ENV dot . + ENV fee fff + ENV gee ggg + + ADD ${baz} ${dot} + COPY ${quux} ${dot} + ADD ${zzz:-${fee}} ${dot} + COPY ${zzz:-${gee}} ${dot} + `, + map[string]string{ + "foo": "test1", + "bar": "test2", + "fff": "test3", + "ggg": "test4", + }) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, + ` + FROM busybox + ENV foo zzz + ENV bar ${foo} + ENV abc1='$foo' + ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" + RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) + ENV abc2="\$foo" + RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) + ENV abc3 '$foo' + RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) + ENV abc4 "\$foo" + RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) + `, true) + + if err != nil { + c.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Env") + if err != nil { + c.Fatal(err) + } + + envResult := []string{} + + if err = unmarshalJSON([]byte(res), &envResult); err != nil { + c.Fatal(err) + } + + found := false + envCount := 0 + + for _, env := range envResult { + parts := strings.SplitN(env, "=", 2) + if parts[0] == "bar" { + found = true + if parts[1] != "zzz" { + c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "zzz" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "foo" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } + } + + if !found { + c.Fatal("Never found the `bar` env variable") + } + + if envCount != 4 { + c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) + } + +} + +func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) { + name := "testbuildhandleescapes" + + _, err := buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME ${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + var result map[string]map[string]struct{} + + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + c.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result["bar"]; !ok { + c.Fatal("Could not find volume bar set from env foo in volumes table") + } + + deleteImages(name) + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + res, err = inspectFieldJSON(name, "Config.Volumes") + if err != nil { + c.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result["${FOO}"]; !ok { + c.Fatal("Could not find volume ${FOO} set from env foo in volumes table") + } + + deleteImages(name) + + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \\\\\\\${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + res, err = inspectFieldJSON(name, "Config.Volumes") + if err != nil { + c.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result[`\\\${FOO}`]; !ok { + c.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result) + } + +} + +func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { + name := "testbuildonbuildlowercase" + name2 := "testbuildonbuildlowercase2" + + _, err := buildImage(name, + ` + FROM busybox + onbuild run echo quux + `, true) + + if err != nil { + c.Fatal(err) + } + + _, out, err := buildImageWithOut(name2, fmt.Sprintf(` + FROM %s + `, name), true) + + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, "quux") { + c.Fatalf("Did not receive the expected echo text, got %s", out) + } + + if strings.Contains(out, "ONBUILD ONBUILD") { + c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) + } + +} + +func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { + name := "testbuildenvescapes" + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo \$ + `, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-t", name) + + if strings.TrimSpace(out) != "$" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { + name := "testbuildenvoverwrite" + + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo ${TEST} + `, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) + + if strings.TrimSpace(out) != "bar" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainerInSourceImage(c *check.C) { + name := "testbuildonbuildforbiddenmaintainerinsourceimage" + + out, _ := dockerCmd(c, "create", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild") + + _, err := buildImage(name, + `FROM onbuild`, + true) + if err != nil { + if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") { + c.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err) + } + } else { + c.Fatal("Error must not be nil") + } + +} + +func (s *DockerSuite) TestBuildOnBuildForbiddenFromInSourceImage(c *check.C) { + name := "testbuildonbuildforbiddenfrominsourceimage" + + out, _ := dockerCmd(c, "create", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild") + + _, err := buildImage(name, + `FROM onbuild`, + true) + if err != nil { + if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") { + c.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err) + } + } else { + c.Fatal("Error must not be nil") + } + +} + +func (s *DockerSuite) TestBuildOnBuildForbiddenChainedInSourceImage(c *check.C) { + name := "testbuildonbuildforbiddenchainedinsourceimage" + + out, _ := dockerCmd(c, "create", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild") + + _, err := buildImage(name, + `FROM onbuild`, + true) + if err != nil { + if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { + c.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err) + } + } else { + c.Fatal("Error must not be nil") + } + +} + +func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD CMD ["hello world"] +ONBUILD ENTRYPOINT ["echo"] +ONBUILD RUN ["true"]`, + false) + + if err != nil { + c.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-t", name2) + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatal("did not get echo output from onbuild", out) + } + +} + +func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD ENTRYPOINT ["echo"]`, + false) + + if err != nil { + c.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-t", name2) + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatal("got malformed output from onbuild", out) + } + +} + +func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { + name := "testbuildtwoimageswithadd" + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + if _, err := buildImage(name, + fmt.Sprintf(`FROM scratch + ADD %s/robots.txt /`, server.URL()), + true); err != nil { + c.Fatal(err) + } + if err != nil { + c.Fatal(err) + } + deleteImages(name) + _, out, err := buildImageWithOut(name, + fmt.Sprintf(`FROM scratch + ADD %s/index.html /`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + if strings.Contains(out, "Using cache") { + c.Fatal("2nd build used cache on ADD, it shouldn't") + } + +} + +func (s *DockerSuite) TestBuildLastModified(c *check.C) { + name := "testbuildlastmodified" + + server, err := fakeStorage(map[string]string{ + "file": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + var out, out2 string + + dFmt := `FROM busybox +ADD %s/file / +RUN ls -le /file` + + dockerfile := fmt.Sprintf(dFmt, server.URL()) + + if _, out, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + + originMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out) + // Make sure our regexp is correct + if strings.Index(originMTime, "/file") < 0 { + c.Fatalf("Missing ls info on 'file':\n%s", out) + } + + // Build it again and make sure the mtime of the file didn't change. + // Wait a few seconds to make sure the time changed enough to notice + time.Sleep(2 * time.Second) + + if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + + newMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out2) + if newMTime != originMTime { + c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime) + } + + // Now 'touch' the file and make sure the timestamp DID change this time + // Create a new fakeStorage instead of just using Add() to help windows + server, err = fakeStorage(map[string]string{ + "file": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + dockerfile = fmt.Sprintf(dFmt, server.URL()) + + if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + + newMTime = regexp.MustCompile(`root.*/file.*\n`).FindString(out2) + if newMTime == originMTime { + c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime) + } + +} + +func (s *DockerSuite) TestBuildSixtySteps(c *check.C) { + name := "foobuildsixtysteps" + ctx, err := fakeContext("FROM scratch\n"+strings.Repeat("ADD foo /\n", 60), + map[string]string{ + "foo": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { + name := "testaddimg" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Issue #3960: "ADD src ." hangs +func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { + name := "testaddsinglefiletoworkdir" + ctx, err := fakeContext(`FROM busybox +ADD test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + errChan := make(chan error) + go func() { + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) + }() + select { + case <-time.After(5 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { + name := "testaddsinglefiletoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + name := "testcopymultiplefilestofile" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 %s/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +`, server.URL()), + map[string]string{ + "test_file1": "test1", + "test_file2": "test2", + "test_file3": "test3", + "test_file4": "test4", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddMultipleFilesToFile(c *check.C) { + name := "testaddmultiplefilestofile" + ctx, err := fakeContext(`FROM scratch + ADD file1.txt file2.txt test + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + } + +} + +func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFile(c *check.C) { + name := "testjsonaddmultiplefilestofile" + ctx, err := fakeContext(`FROM scratch + ADD ["file1.txt", "file2.txt", "test"] + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + } + +} + +func (s *DockerSuite) TestBuildAddMultipleFilesToFileWild(c *check.C) { + name := "testaddmultiplefilestofilewild" + ctx, err := fakeContext(`FROM scratch + ADD file*.txt test + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + } + +} + +func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFileWild(c *check.C) { + name := "testjsonaddmultiplefilestofilewild" + ctx, err := fakeContext(`FROM scratch + ADD ["file*.txt", "test"] + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + } + +} + +func (s *DockerSuite) TestBuildCopyMultipleFilesToFile(c *check.C) { + name := "testcopymultiplefilestofile" + ctx, err := fakeContext(`FROM scratch + COPY file1.txt file2.txt test + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + } + +} + +func (s *DockerSuite) TestBuildJSONCopyMultipleFilesToFile(c *check.C) { + name := "testjsoncopymultiplefilestofile" + ctx, err := fakeContext(`FROM scratch + COPY ["file1.txt", "file2.txt", "test"] + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + } + +} + +func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { + name := "testaddfilewithwhitespace" + ctx, err := fakeContext(`FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +ADD [ "test file1", "/test_file1" ] +ADD [ "test_file2", "/test file2" ] +ADD [ "test file3", "/test file3" ] +ADD [ "test dir/test_file4", "/test_dir/test_file4" ] +ADD [ "test_dir/test_file5", "/test dir/test_file5" ] +ADD [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, + map[string]string{ + "test file1": "test1", + "test_file2": "test2", + "test file3": "test3", + "test dir/test_file4": "test4", + "test_dir/test_file5": "test5", + "test dir/test_file6": "test6", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) { + name := "testcopyfilewithwhitespace" + ctx, err := fakeContext(`FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +COPY [ "test file1", "/test_file1" ] +COPY [ "test_file2", "/test file2" ] +COPY [ "test file3", "/test file3" ] +COPY [ "test dir/test_file4", "/test_dir/test_file4" ] +COPY [ "test_dir/test_file5", "/test dir/test_file5" ] +COPY [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, + map[string]string{ + "test file1": "test1", + "test_file2": "test2", + "test file3": "test3", + "test dir/test_file4": "test4", + "test_dir/test_file5": "test5", + "test dir/test_file6": "test6", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddMultipleFilesToFileWithWhitespace(c *check.C) { + name := "testaddmultiplefilestofilewithwhitespace" + ctx, err := fakeContext(`FROM busybox + ADD [ "test file1", "test file2", "test" ] + `, + map[string]string{ + "test file1": "test1", + "test file2": "test2", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + } + +} + +func (s *DockerSuite) TestBuildCopyMultipleFilesToFileWithWhitespace(c *check.C) { + name := "testcopymultiplefilestofilewithwhitespace" + ctx, err := fakeContext(`FROM busybox + COPY [ "test file1", "test file2", "test" ] + `, + map[string]string{ + "test file1": "test1", + "test file2": "test2", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + } + +} + +func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { + name := "testcopywildcard" + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + COPY file*.txt /tmp/ + RUN ls /tmp/file1.txt /tmp/file2.txt + RUN mkdir /tmp1 + COPY dir* /tmp1/ + RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file + RUN mkdir /tmp2 + ADD dir/*dir %s/robots.txt /tmp2/ + RUN ls /tmp2/nest_nest_file /tmp2/robots.txt + `, server.URL()), + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test2", + "dir/nested_file": "nested file", + "dir/nested_dir/nest_nest_file": "2 times nested", + "dirt": "dirty", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + // Now make sure we use a cache the 2nd time + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildCopyWildcardNoFind(c *check.C) { + name := "testcopywildcardnofind" + ctx, err := fakeContext(`FROM busybox + COPY file*.txt /tmp/ + `, nil) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + _, err = buildImageFromContext(name, ctx, true) + if err == nil { + c.Fatal("should have failed to find a file") + } + if !strings.Contains(err.Error(), "No source files were specified") { + c.Fatalf("Wrong error %v, must be about no source files", err) + } + +} + +func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { + name := "testcopywildcardinname" + ctx, err := fakeContext(`FROM busybox + COPY *.txt /tmp/ + RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] + `, map[string]string{"*.txt": "hi there"}) + + if err != nil { + // Normally we would do c.Fatal(err) here but given that + // the odds of this failing are so rare, it must be because + // the OS we're running the client on doesn't support * in + // filenames (like windows). So, instead of failing the test + // just let it pass. Then we don't need to explicitly + // say which OSs this works on or not. + return + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("should have built: %q", err) + } +} + +func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { + name := "testcopywildcardcache" + ctx, err := fakeContext(`FROM busybox + COPY file1.txt /tmp/`, + map[string]string{ + "file1.txt": "test1", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + // Now make sure we use a cache the 2nd time even with wild cards. + // Use the same context so the file is the same and the checksum will match + ctx.Add("Dockerfile", `FROM busybox + COPY file*.txt /tmp/`) + + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { + name := "testaddsinglefiletononexistingdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { + name := "testadddircontenttoroot" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { + name := "testadddircontenttoexistingdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { + name := "testaddwholedirtoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Testing #5941 +func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) { + name := "testaddetctoroot" + ctx, err := fakeContext(`FROM scratch +ADD . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Testing #9401 +func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { + name := "testaddpreservesfilesspecialbits" + ctx, err := fakeContext(`FROM busybox +ADD suidbin /usr/bin/suidbin +RUN chmod 4755 /usr/bin/suidbin +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] +ADD ./data/ / +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, + map[string]string{ + "suidbin": "suidbin", + "/data/usr/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { + name := "testcopysinglefiletoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Issue #3960: "ADD src ." hangs - adapted for COPY +func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { + name := "testcopysinglefiletoworkdir" + ctx, err := fakeContext(`FROM busybox +COPY test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + errChan := make(chan error) + go func() { + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) + }() + select { + case <-time.After(5 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { + name := "testcopysinglefiletoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { + name := "testcopysinglefiletononexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { + name := "testcopydircontenttoroot" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { + name := "testcopydircontenttoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { + name := "testcopywholedirtoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) { + name := "testcopyetctoroot" + ctx, err := fakeContext(`FROM scratch +COPY . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyDisallowRemote(c *check.C) { + name := "testcopydisallowremote" + _, out, err := buildImageWithOut(name, `FROM scratch +COPY https://index.docker.io/robots.txt /`, + true) + if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") { + c.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out) + } +} + +func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { + const ( + dockerfile = ` + FROM scratch + ADD links.tar / + ADD foo.txt /symlink/ + ` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute" + ) + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + var symlinkTarget string + if runtime.GOOS == "windows" { + var driveLetter string + if abs, err := filepath.Abs(tempDir); err != nil { + c.Fatal(err) + } else { + driveLetter = abs[:1] + } + tempDirWithoutDrive := tempDir[2:] + symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) + } else { + symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) + } + + tarPath := filepath.Join(ctx.Dir, "links.tar") + nonExistingFile := filepath.Join(tempDir, targetFile) + fooPath := filepath.Join(ctx.Dir, targetFile) + + tarOut, err := os.Create(tarPath) + if err != nil { + c.Fatal(err) + } + + tarWriter := tar.NewWriter(tarOut) + + header := &tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: symlinkTarget, + Mode: 0755, + Uid: 0, + Gid: 0, + } + + err = tarWriter.WriteHeader(header) + if err != nil { + c.Fatal(err) + } + + tarWriter.Close() + tarOut.Close() + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { + const ( + dockerfileTemplate = ` + FROM busybox + RUN ln -s /../../../../../../../../%s /x + VOLUME /x + ADD foo.txt /x/` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute-volume" + dockerfile = "" + ) + + tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) + nonExistingFile := filepath.Join(tempDir, targetFile) + + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + fooPath := filepath.Join(ctx.Dir, targetFile) + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { + testRequires(c, UnixCli) // test uses chown/chmod: not available on windows + + { + name := "testbuildinaccessiblefiles" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") + + if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown file to root: %s", err) + } + if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "no permission to read from ") { + c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + } + + if !strings.Contains(out, "Error checking context") { + c.Fatalf("output should've contained the string: Error checking context") + } + } + { + name := "testbuildinaccessibledirectory" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "can't stat") { + c.Fatalf("output should've contained the string: can't access %s", out) + } + + if !strings.Contains(out, "Error checking context") { + c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out) + } + + } + { + name := "testlinksok" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + target := "../../../../../../../../../../../../../../../../../../../azA" + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { + c.Fatal(err) + } + defer os.Remove(target) + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + } + { + name := "testbuildignoredinaccessible" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", + map[string]string{ + "directoryWeCantStat/bar": "foo", + ".dockerignore": "directoryWeCantStat", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 755: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 444: %s", err) + } + + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + if out, _, err := runCommandWithOutput(buildCmd); err != nil { + c.Fatalf("build should have worked: %s %s", err, out) + } + + } +} + +func (s *DockerSuite) TestBuildForceRm(c *check.C) { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + name := "testbuildforcerm" + ctx, err := fakeContext("FROM scratch\nRUN true\nRUN thiswillfail", nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".") + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("--force-rm shouldn't have left containers behind") + } + +} + +// Test that an infinite sleep during a build is killed if the client disconnects. +// This test is fairly hairy because there are lots of ways to race. +// Strategy: +// * Monitor the output of docker events starting from before +// * Run a 1-year-long sleep from a docker build. +// * When docker events sees container start, close the "docker build" command +// * Wait for docker events to emit a dying event. +func (s *DockerSuite) TestBuildCancelationKillsSleep(c *check.C) { + name := "testbuildcancelation" + + // (Note: one year, will never finish) + ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + finish := make(chan struct{}) + defer close(finish) + + eventStart := make(chan struct{}) + eventDie := make(chan struct{}) + containerID := make(chan string) + + startEpoch := daemonTime(c).Unix() + // Watch for events since epoch. + eventsCmd := exec.Command( + dockerBinary, "events", + "--since", strconv.FormatInt(startEpoch, 10)) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + if err := eventsCmd.Start(); err != nil { + c.Fatal(err) + } + defer eventsCmd.Process.Kill() + + // Goroutine responsible for watching start/die events from `docker events` + go func() { + cid := <-containerID + + matchStart := regexp.MustCompile(cid + `(.*) start$`) + matchDie := regexp.MustCompile(cid + `(.*) die$`) + + // + // Read lines of `docker events` looking for container start and stop. + // + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + switch { + case matchStart.MatchString(scanner.Text()): + close(eventStart) + case matchDie.MatchString(scanner.Text()): + close(eventDie) + } + } + }() + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") + buildCmd.Dir = ctx.Dir + + stdoutBuild, err := buildCmd.StdoutPipe() + if err := buildCmd.Start(); err != nil { + c.Fatalf("failed to run build: %s", err) + } + + matchCID := regexp.MustCompile("Running in ") + scanner := bufio.NewScanner(stdoutBuild) + for scanner.Scan() { + line := scanner.Text() + if ok := matchCID.MatchString(line); ok { + containerID <- line[len(line)-12:] + break + } + } + + select { + case <-time.After(5 * time.Second): + c.Fatal("failed to observe build container start in timely fashion") + case <-eventStart: + // Proceeds from here when we see the container fly past in the + // output of "docker events". + // Now we know the container is running. + } + + // Send a kill to the `docker build` command. + // Causes the underlying build to be cancelled due to socket close. + if err := buildCmd.Process.Kill(); err != nil { + c.Fatalf("error killing build command: %s", err) + } + + // Get the exit status of `docker build`, check it exited because killed. + if err := buildCmd.Wait(); err != nil && !isKilled(err) { + c.Fatalf("wait failed during build run: %T %s", err, err) + } + + select { + case <-time.After(5 * time.Second): + // If we don't get here in a timely fashion, it wasn't killed. + c.Fatal("container cancel did not succeed") + case <-eventDie: + // We saw the container shut down in the `docker events` stream, + // as expected. + } + +} + +func (s *DockerSuite) TestBuildRm(c *check.C) { + name := "testbuildrm" + ctx, err := fakeContext("FROM scratch\nADD foo /\nADD foo /", map[string]string{"foo": "bar"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("-rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("--rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore == containerCountAfter { + c.Fatalf("--rm=false should have left containers behind") + } + deleteImages(name) + + } + +} + +func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { + var ( + result map[string]map[string]struct{} + name = "testbuildvolumes" + emptyMap = make(map[string]struct{}) + expected = map[string]map[string]struct{}{ + "/test1": emptyMap, + "/test2": emptyMap, + "/test3": emptyMap, + "/test4": emptyMap, + "/test5": emptyMap, + "/test6": emptyMap, + "[/test7": emptyMap, + "/test8]": emptyMap, + } + ) + _, err := buildImage(name, + `FROM scratch + VOLUME /test1 + VOLUME /test2 + VOLUME /test3 /test4 + VOLUME ["/test5", "/test6"] + VOLUME [/test7 /test8] + `, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + c.Fatal(err) + } + + err = unmarshalJSON([]byte(res), &result) + if err != nil { + c.Fatal(err) + } + + equal := reflect.DeepEqual(&result, &expected) + + if !equal { + c.Fatalf("Volumes %s, expected %s", result, expected) + } + +} + +func (s *DockerSuite) TestBuildMaintainer(c *check.C) { + name := "testbuildmaintainer" + expected := "dockerio" + _, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Author") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Maintainer %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildUser(c *check.C) { + name := "testbuilduser" + expected := "dockerio" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio + RUN [ $(whoami) = 'dockerio' ]`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.User") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("User %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { + name := "testbuildrelativeworkdir" + expected := "/test2/test3" + _, err := buildImage(name, + `FROM busybox + RUN [ "$PWD" = '/' ] + WORKDIR test1 + RUN [ "$PWD" = '/test1' ] + WORKDIR /test2 + RUN [ "$PWD" = '/test2' ] + WORKDIR test3 + RUN [ "$PWD" = '/test2/test3' ]`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.WorkingDir") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Workdir %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { + name := "testbuildworkdirwithenvvariables" + expected := "/test1/test2" + _, err := buildImage(name, + `FROM busybox + ENV DIRPATH /test1 + ENV SUBDIRNAME test2 + WORKDIR $DIRPATH + WORKDIR $SUBDIRNAME/$MISSING_VAR`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.WorkingDir") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Workdir %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { + name := "testbuildrelativecopy" + dockerfile := ` + FROM busybox + WORKDIR /test1 + WORKDIR test2 + RUN [ "$PWD" = '/test1/test2' ] + COPY foo ./ + RUN [ "$(cat /test1/test2/foo)" = 'hello' ] + ADD foo ./bar/baz + RUN [ "$(cat /test1/test2/bar/baz)" = 'hello' ] + COPY foo ./bar/baz2 + RUN [ "$(cat /test1/test2/bar/baz2)" = 'hello' ] + WORKDIR .. + COPY foo ./ + RUN [ "$(cat /test1/foo)" = 'hello' ] + COPY foo /test3/ + RUN [ "$(cat /test3/foo)" = 'hello' ] + WORKDIR /test4 + COPY . . + RUN [ "$(cat /test4/foo)" = 'hello' ] + WORKDIR /test5/test6 + COPY foo ../ + RUN [ "$(cat /test5/foo)" = 'hello' ] + ` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + _, err = buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEnv(c *check.C) { + name := "testbuildenv" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + _, err := buildImage(name, + `FROM busybox + ENV PATH /test:$PATH + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.Env") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Env %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM scratch + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM scratch + RUN /non/existing/command`, + true) + if err == nil { + c.Fatalf("expected build to fail, but it didn't") + } + entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildCmd(c *check.C) { + name := "testbuildcmd" + expected := "{[/bin/echo Hello World]}" + _, err := buildImage(name, + `FROM scratch + CMD ["/bin/echo", "Hello World"]`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Cmd %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExpose(c *check.C) { + name := "testbuildexpose" + expected := "map[2375/tcp:{}]" + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { + // start building docker file with a large number of ports + portList := make([]string, 50) + line := make([]string, 100) + expectedPorts := make([]int, len(portList)*len(line)) + for i := 0; i < len(portList); i++ { + for j := 0; j < len(line); j++ { + p := i*len(line) + j + 1 + line[j] = strconv.Itoa(p) + expectedPorts[p-1] = p + } + if i == len(portList)-1 { + portList[i] = strings.Join(line, " ") + } else { + portList[i] = strings.Join(line, " ") + ` \` + } + } + + dockerfile := `FROM scratch + EXPOSE {{range .}} {{.}} + {{end}}` + tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) + buf := bytes.NewBuffer(nil) + tmpl.Execute(buf, portList) + + name := "testbuildexpose" + _, err := buildImage(name, buf.String(), true) + if err != nil { + c.Fatal(err) + } + + // check if all the ports are saved inside Config.ExposedPorts + res, err := inspectFieldJSON(name, "Config.ExposedPorts") + if err != nil { + c.Fatal(err) + } + var exposedPorts map[string]interface{} + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + for _, p := range expectedPorts { + ep := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[ep]; !ok { + c.Errorf("Port(%s) is not exposed", ep) + } else { + delete(exposedPorts, ep) + } + } + if len(exposedPorts) != 0 { + c.Errorf("Unexpected extra exposed ports %v", exposedPorts) + } +} + +func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { + buildID := func(name, exposed string) string { + _, err := buildImage(name, fmt.Sprintf(`FROM scratch + EXPOSE %s`, exposed), true) + if err != nil { + c.Fatal(err) + } + id, err := inspectField(name, "Id") + if err != nil { + c.Fatal(err) + } + return id + } + + id1 := buildID("testbuildexpose1", "80 2375") + id2 := buildID("testbuildexpose2", "2375 80") + if id1 != id2 { + c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") + } +} + +func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { + name := "testbuildexposeuppercaseproto" + expected := "map[5678/udp:{}]" + _, err := buildImage(name, + `FROM scratch + EXPOSE 5678/UDP`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { + name := "testbuildentrypointinheritance" + name2 := "testbuildentrypointinheritance2" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + c.Fatal(err) + } + + expected := "{[/bin/echo]}" + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + _, err = buildImage(name2, + fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name), + true) + if err != nil { + c.Fatal(err) + } + res, err = inspectField(name2, "Config.Entrypoint") + if err != nil { + c.Fatal(err) + } + + expected = "{[]}" + + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { + name := "testbuildentrypoint" + expected := "{[]}" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT []`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { + name := "testbuildentrypoint" + expected := "{[/bin/echo]}" + _, err := buildImage(name, + `FROM scratch + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +// #6445 ensure ONBUILD triggers aren't committed to grandchildren +func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { + var ( + out2, out3 string + ) + { + name1 := "testonbuildtrigger1" + dockerfile1 := ` + FROM busybox + RUN echo "GRANDPARENT" + ONBUILD RUN echo "ONBUILD PARENT" + ` + ctx, err := fakeContext(dockerfile1, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out1, err) + } + } + { + name2 := "testonbuildtrigger2" + dockerfile2 := ` + FROM testonbuildtrigger1 + ` + ctx, err := fakeContext(dockerfile2, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out2, err) + } + } + { + name3 := "testonbuildtrigger3" + dockerfile3 := ` + FROM testonbuildtrigger2 + ` + ctx, err := fakeContext(dockerfile3, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out3, err) + } + + } + + // ONBUILD should be run in second build. + if !strings.Contains(out2, "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") + } + + // ONBUILD should *not* be run in third build. + if strings.Contains(out3, "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") + } + +} + +func (s *DockerSuite) TestBuildWithCache(c *check.C) { + name := "testbuildwithcache" + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildWithoutCache(c *check.C) { + name := "testbuildwithoutcache" + name2 := "testbuildwithoutcache2" + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + + id2, err := buildImage(name2, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { + name := "testbuildconditionalcache" + + dockerfile := ` + FROM busybox + ADD foo /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("Error building #1: %s", err) + } + + if err := ctx.Add("foo", "bye"); err != nil { + c.Fatalf("Error modifying foo: %s", err) + } + + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatalf("Error building #2: %s", err) + } + if id2 == id1 { + c.Fatal("Should not have used the cache") + } + + id3, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("Error building #3: %s", err) + } + if id3 != id2 { + c.Fatal("Should have used the cache") + } +} + +func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) { + name := "testbuildaddlocalfilewithcache" + name2 := "testbuildaddlocalfilewithcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) { + name := "testbuildaddmultiplelocalfilewithcache" + name2 := "testbuildaddmultiplelocalfilewithcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo Dockerfile /usr/lib/bla/ + RUN [ "$(cat /usr/lib/bla/foo)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) { + name := "testbuildaddlocalfilewithoutcache" + name2 := "testbuildaddlocalfilewithoutcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { + name := "testbuildcopydirbutnotfile" + name2 := "testbuildcopydirbutnotfile2" + dockerfile := ` + FROM scratch + COPY dir /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "dir/foo": "hello", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + // Check that adding file with similar name doesn't mess with cache + if err := ctx.Add("dir_file", "hello2"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { + name := "testbuildaddcurrentdirwithcache" + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + dockerfile := ` + FROM scratch + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + // Check that adding file invalidate cache of "ADD ." + if err := ctx.Add("bar", "hello2"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file invalidate cache of "ADD ." + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + id3, err := buildImageFromContext(name3, ctx, true) + if err != nil { + c.Fatal(err) + } + if id2 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file to same content with different mtime does not + // invalidate cache of "ADD ." + time.Sleep(1 * time.Second) // wait second because of mtime precision + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + id4, err := buildImageFromContext(name4, ctx, true) + if err != nil { + c.Fatal(err) + } + if id3 != id4 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { + name := "testbuildaddcurrentdirwithoutcache" + name2 := "testbuildaddcurrentdirwithoutcache2" + dockerfile := ` + FROM scratch + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) { + name := "testbuildaddremotefilewithcache" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + id1, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) { + name := "testbuildaddremotefilewithoutcache" + name2 := "testbuildaddremotefilewithoutcache2" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + id1, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name2, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { + name := "testbuildaddremotefilemtime" + name2 := name + "2" + name3 := name + "3" + + files := map[string]string{"baz": "hello"} + server, err := fakeStorage(files) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't - #1") + } + + // Now create a different server with same contents (causes different mtime) + // The cache should still be used + + // allow some time for clock to pass as mtime precision is only 1s + time.Sleep(2 * time.Second) + + server2, err := fakeStorage(files) + if err != nil { + c.Fatal(err) + } + defer server2.Close() + + ctx2, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) + if err != nil { + c.Fatal(err) + } + defer ctx2.Close() + id3, err := buildImageFromContext(name3, ctx2, true) + if err != nil { + c.Fatal(err) + } + if id1 != id3 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithcache" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func testContextTar(c *check.C, compression archive.Compression) { + ctx, err := fakeContext( + `FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`, + map[string]string{ + "foo": "bar", + }, + ) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + context, err := archive.Tar(ctx.Dir, compression) + if err != nil { + c.Fatalf("failed to build context tar: %v", err) + } + name := "contexttar" + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = context + + if out, _, err := runCommandWithOutput(buildCmd); err != nil { + c.Fatalf("build failed to complete: %v %v", out, err) + } +} + +func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { + testContextTar(c, archive.Gzip) +} + +func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { + testContextTar(c, archive.Uncompressed) +} + +func (s *DockerSuite) TestBuildNoContext(c *check.C) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") + buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n") + + if out, _, err := runCommandWithOutput(buildCmd); err != nil { + c.Fatalf("build failed to complete: %v %v", out, err) + } + + if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { + c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") + } +} + +// TODO: TestCaching +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithoutcache" + name2 := "testbuildaddlocalandremotefilewithoutcache2" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { + name := "testbuildimg" + + _, err := buildImage(name, + `FROM busybox:latest + RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test + VOLUME /test`, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") + + if expected := "drw-------"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + + if expected := "daemon daemon"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + +} + +// testing #1405 - config.Cmd does not get cleaned up if +// utilizing cache +func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { + name := "testbuildcmdcleanup" + if _, err := buildImage(name, + `FROM busybox + RUN echo "hello"`, + true); err != nil { + c.Fatal(err) + } + + ctx, err := fakeContext(`FROM busybox + RUN echo "hello" + ADD foo /foo + ENTRYPOINT ["/bin/echo"]`, + map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + c.Fatal(err) + } + // Cmd must be cleaned up + if res != "" { + c.Fatalf("Cmd %s, expected nil", res) + } +} + +func (s *DockerSuite) TestBuildForbiddenContextPath(c *check.C) { + name := "testbuildforbidpath" + ctx, err := fakeContext(`FROM scratch + ADD ../../ test/ + `, + map[string]string{ + "test.txt": "test1", + "other.txt": "other", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + expected := "Forbidden path outside the build context: ../../ " + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + } + +} + +func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { + name := "testbuildaddnotfound" + ctx, err := fakeContext(`FROM scratch + ADD foo /usr/local/bar`, + map[string]string{"bar": "hello"}) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + if !strings.Contains(err.Error(), "foo: no such file or directory") { + c.Fatalf("Wrong error %v, must be about missing foo file or directory", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildInheritance(c *check.C) { + name := "testbuildinheritance" + + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + c.Fatal(err) + } + ports1, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + c.Fatal(err) + } + + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["/bin/echo"]`, name), + true) + if err != nil { + c.Fatal(err) + } + + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + c.Fatal(err) + } + if expected := "{[/bin/echo]}"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + ports2, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + c.Fatal(err) + } + if ports1 != ports2 { + c.Fatalf("Ports must be same: %s != %s", ports1, ports2) + } +} + +func (s *DockerSuite) TestBuildFails(c *check.C) { + name := "testbuildfails" + _, err := buildImage(name, + `FROM busybox + RUN sh -c "exit 23"`, + true) + if err != nil { + if !strings.Contains(err.Error(), "returned a non-zero code: 23") { + c.Fatalf("Wrong error %v, must be about non-zero code 23", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildFailsDockerfileEmpty(c *check.C) { + name := "testbuildfails" + _, err := buildImage(name, ``, true) + if err != nil { + if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") { + c.Fatalf("Wrong error %v, must be about empty Dockerfile", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildOnBuild(c *check.C) { + name := "testbuildonbuild" + _, err := buildImage(name, + `FROM busybox + ONBUILD RUN touch foobar`, + true) + if err != nil { + c.Fatal(err) + } + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + RUN [ -f foobar ]`, name), + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildOnBuildForbiddenChained(c *check.C) { + name := "testbuildonbuildforbiddenchained" + _, err := buildImage(name, + `FROM busybox + ONBUILD ONBUILD RUN touch foobar`, + true) + if err != nil { + if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { + c.Fatalf("Wrong error %v, must be about chaining ONBUILD", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildOnBuildForbiddenFrom(c *check.C) { + name := "testbuildonbuildforbiddenfrom" + _, err := buildImage(name, + `FROM busybox + ONBUILD FROM scratch`, + true) + if err != nil { + if !strings.Contains(err.Error(), "FROM isn't allowed as an ONBUILD trigger") { + c.Fatalf("Wrong error %v, must be about FROM forbidden", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainer(c *check.C) { + name := "testbuildonbuildforbiddenmaintainer" + _, err := buildImage(name, + `FROM busybox + ONBUILD MAINTAINER docker.io`, + true) + if err != nil { + if !strings.Contains(err.Error(), "MAINTAINER isn't allowed as an ONBUILD trigger") { + c.Fatalf("Wrong error %v, must be about MAINTAINER forbidden", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +// gh #2446 +func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { + name := "testbuildaddtosymlinkdest" + ctx, err := fakeContext(`FROM busybox + RUN mkdir /foo + RUN ln -s /foo /bar + ADD foo /bar/ + RUN [ -f /bar/foo ] + RUN [ -f /foo/foo ]`, + map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { + name := "testbuildescaping" + + _, err := buildImage(name, ` + FROM busybox + MAINTAINER "Docker \ +IO " + `, true) + if err != nil { + c.Fatal(err) + } + + res, err := inspectField(name, "Author") + + if err != nil { + c.Fatal(err) + } + + if res != "\"Docker IO \"" { + c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) + } + +} + +func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { + // Verify that strings that look like ints are still passed as strings + name := "testbuildstringing" + + _, err := buildImage(name, ` + FROM busybox + MAINTAINER 123 + `, true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "inspect", name) + + if !strings.Contains(out, "\"123\"") { + c.Fatalf("Output does not contain the int as a string:\n%s", out) + } + +} + +func (s *DockerSuite) TestBuildDockerignore(c *check.C) { + name := "testbuilddockerignore" + dockerfile := ` + FROM busybox + ADD . /bla + RUN [[ -f /bla/src/x.go ]] + RUN [[ -f /bla/Makefile ]] + RUN [[ ! -e /bla/src/_vendor ]] + RUN [[ ! -e /bla/.gitignore ]] + RUN [[ ! -e /bla/README.md ]] + RUN [[ ! -e /bla/dir/foo ]] + RUN [[ ! -e /bla/foo ]] + RUN [[ ! -e /bla/.git ]]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + "dir/foo": "", + ".gitignore": "", + "README.md": "readme", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +dir`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { + name := "testbuilddockerignorecleanpaths" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN (! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "foo", + "foo2": "foo2", + "dir1/foo": "foo in dir1", + ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { + name := "testbuilddockerignoreexceptions" + dockerfile := ` + FROM busybox + ADD . /bla + RUN [[ -f /bla/src/x.go ]] + RUN [[ -f /bla/Makefile ]] + RUN [[ ! -e /bla/src/_vendor ]] + RUN [[ ! -e /bla/.gitignore ]] + RUN [[ ! -e /bla/README.md ]] + RUN [[ -e /bla/dir/dir/foo ]] + RUN [[ ! -e /bla/dir/foo1 ]] + RUN [[ -f /bla/dir/e ]] + RUN [[ -f /bla/dir/e-dir/foo ]] + RUN [[ ! -e /bla/foo ]] + RUN [[ ! -e /bla/.git ]]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + "dir/foo": "", + "dir/foo1": "", + "dir/dir/f1": "", + "dir/dir/foo": "", + "dir/e": "", + "dir/e-dir/foo": "", + ".gitignore": "", + "README.md": "readme", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +dir +!dir/e* +!dir/dir/foo`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ! ls /tmp/Dockerfile + RUN ls /tmp/.dockerignore` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": "Dockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore Dockerfile correctly:%s", err) + } + + // now try it with ./Dockerfile + ctx.Add(".dockerignore", "./Dockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) + } + +} + +func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls /tmp/Dockerfile + RUN ! ls /tmp/MyDockerfile + RUN ls /tmp/.dockerignore` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "Should not use me", + "MyDockerfile": dockerfile, + ".dockerignore": "MyDockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) + } + + // now try it with ./MyDockerfile + ctx.Add(".dockerignore", "./MyDockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) + } + +} + +func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { + name := "testbuilddockerignoredockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ! ls /tmp/.dockerignore + RUN ls /tmp/Dockerfile` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": ".dockerignore\n", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore .dockerignore correctly:%s", err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { + var id1 string + var id2 string + + name := "testbuilddockerignoretouchdockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": "Dockerfile\n", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + if id1, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 1") + } + + // Now make sure touching Dockerfile doesn't invalidate the cache + if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 2") + } + + // One more time but just 'touch' it instead of changing the content + if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 3") + } + +} + +func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { + name := "testbuilddockerignorewholedir" + dockerfile := ` + FROM busybox + COPY . / + RUN [[ ! -e /.gitignore ]] + RUN [[ -f /Makefile ]]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".gitignore": "", + ".dockerignore": ".*\n", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "."), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { + name := "testbuilddockerignorewholedir" + dockerfile := ` + FROM busybox + COPY . / + RUN [[ ! -e /.gitignore ]] + RUN [[ -f /Makefile ]]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".gitignore": "", + ".dockerignore": "!\n", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err == nil { + c.Fatalf("Build was supposed to fail but didn't") + } + + if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" { + c.Fatalf("Incorrect output, got:%q", err.Error()) + } +} + +func (s *DockerSuite) TestBuildLineBreak(c *check.C) { + name := "testbuildlinebreak" + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /var/run/sshd +RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] +RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { + name := "testbuildeolinline" + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass > /tmp/passwd' +RUN echo "foo \n bar"; echo "baz" +RUN mkdir -p /var/run/sshd +RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] +RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { + name := "testbuildcomments" + _, err := buildImage(name, + `FROM busybox +# This is an ordinary comment. +RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh +RUN [ ! -x /hello.sh ] +# comment with line break \ +RUN chmod +x /hello.sh +RUN [ -x /hello.sh ] +RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] +RUN [ "$(/hello.sh)" = "hello world" ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { + name := "testbuildusers" + _, err := buildImage(name, + `FROM busybox + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN id -G +RUN id -Gn +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] + +# Add a "supplementary" group for our dockerio user +RUN echo 'supplementary:x:1002:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] +USER 1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER dockerio:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { + name := "testbuildenvusage" + dockerfile := `FROM busybox +ENV HOME /root +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] +ENV FOO /foo/baz +ENV BAR /bar +ENV BAZ $BAR +ENV FOOPATH $PATH:$FOO +RUN [ "$BAR" = "$BAZ" ] +RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] +ENV FROM hello/docker/world +ENV TO /docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +ENV abc=def +ENV ghi=$abc +RUN [ "$ghi" = "def" ] +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { + name := "testbuildenvusage2" + dockerfile := `FROM busybox +ENV abc=def +RUN [ "$abc" = "def" ] +ENV def="hello world" +RUN [ "$def" = "hello world" ] +ENV def=hello\ world +RUN [ "$def" = "hello world" ] +ENV v1=abc v2="hi there" +RUN [ "$v1" = "abc" ] +RUN [ "$v2" = "hi there" ] +ENV v3='boogie nights' v4="with'quotes too" +RUN [ "$v3" = "boogie nights" ] +RUN [ "$v4" = "with'quotes too" ] +ENV abc=zzz FROM=hello/docker/world +ENV abc=zzz TO=/docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +ENV abc "zzz" +RUN [ $abc = "zzz" ] +ENV abc 'yyy' +RUN [ $abc = 'yyy' ] +ENV abc= +RUN [ "$abc" = "" ] + +# use grep to make sure if the builder substitutes \$foo by mistake +# we don't get a false positive +ENV abc=\$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) +ENV abc \$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) + +ENV abc=\'foo\' +RUN [ "$abc" = "'foo'" ] +ENV abc=\"foo\" +RUN [ "$abc" = "\"foo\"" ] +ENV abc "foo" +RUN [ "$abc" = "foo" ] +ENV abc 'foo' +RUN [ "$abc" = 'foo' ] +ENV abc \'foo\' +RUN [ "$abc" = "'foo'" ] +ENV abc \"foo\" +RUN [ "$abc" = '"foo"' ] + +ENV abc=ABC +RUN [ "$abc" = "ABC" ] +ENV def=${abc:-DEF} +RUN [ "$def" = "ABC" ] +ENV def=${ccc:-DEF} +RUN [ "$def" = "DEF" ] +ENV def=${ccc:-${def}xx} +RUN [ "$def" = "DEFxx" ] +ENV def=${def:+ALT} +RUN [ "$def" = "ALT" ] +ENV def=${def:+${abc}:} +RUN [ "$def" = "ABC:" ] +ENV def=${ccc:-\$abc:} +RUN [ "$def" = '$abc:' ] +ENV def=${ccc:-\${abc}:} +RUN [ "$def" = '${abc:}' ] +ENV mypath=${mypath:+$mypath:}/home +RUN [ "$mypath" = '/home' ] +ENV mypath=${mypath:+$mypath:}/away +RUN [ "$mypath" = '/home:/away' ] + +ENV e1=bar +ENV e2=$e1 +ENV e3=$e11 +ENV e4=\$e1 +ENV e5=\$e11 +RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] + +ENV ee1 bar +ENV ee2 $ee1 +ENV ee3 $ee11 +ENV ee4 \$ee1 +ENV ee5 \$ee11 +RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] + +ENV eee1="foo" +ENV eee2='foo' +ENV eee3 "foo" +ENV eee4 'foo' +RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] + +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddScript(c *check.C) { + name := "testbuildaddscript" + dockerfile := ` +FROM busybox +ADD test /test +RUN ["chmod","+x","/test"] +RUN ["/test"] +RUN [ "$(cat /testfile)" = 'test!' ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "test": "#!/bin/sh\necho 'test!' > /testfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddTar(c *check.C) { + name := "testbuildaddtar" + + ctx := func() *FakeContext { + dockerfile := ` +FROM busybox +ADD test.tar / +RUN cat /test/foo | grep Hi +ADD test.tar /test.tar +RUN cat /test.tar/test/foo | grep Hi +ADD test.tar /unlikely-to-exist +RUN cat /unlikely-to-exist/test/foo | grep Hi +ADD test.tar /unlikely-to-exist-trailing-slash/ +RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi +RUN mkdir /existing-directory +ADD test.tar /existing-directory +RUN cat /existing-directory/test/foo | grep Hi +ADD test.tar /existing-directory-trailing-slash/ +RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTar: %v", err) + } + +} + +func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { + name := "testbuildaddtarxz" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz / + RUN cat /test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "-k", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + +} + +func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { + name := "testbuildaddtarxzgz" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz.gz / + RUN ls /test.tar.xz.gz` + tmpDir, err := ioutil.TempDir("", "fake-context") + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "-k", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + gzipCompressCmd := exec.Command("gzip", "test.tar.xz") + gzipCompressCmd.Dir = tmpDir + out, _, err = runCommandWithOutput(gzipCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + +} + +func (s *DockerSuite) TestBuildFromGIT(c *check.C) { + name := "testbuildfromgit" + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "first": "test git data", + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + _, err = buildImageFromPath(name, git.RepoURL, true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Author") + if err != nil { + c.Fatal(err) + } + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGITWithContext(c *check.C) { + name := "testbuildfromgit" + git, err := newFakeGit("repo", map[string]string{ + "docker/Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "docker/first": "test git data", + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + u := fmt.Sprintf("%s#master:docker", git.RepoURL) + _, err = buildImageFromPath(name, u, true) + if err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Author") + if err != nil { + c.Fatal(err) + } + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { + name := "testbuildfromremotetarball" + + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + MAINTAINER docker`) + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(dockerfile); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, check.IsNil) + + defer server.Close() + + _, err = buildImageFromPath(name, server.URL()+"/testT.tar", true) + c.Assert(err, check.IsNil) + + res, err := inspectField(name, "Author") + c.Assert(err, check.IsNil) + + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { + name := "testbuildcmdcleanuponentrypoint" + if _, err := buildImage(name, + `FROM scratch + CMD ["test"] + ENTRYPOINT ["echo"]`, + true); err != nil { + c.Fatal(err) + } + if _, err := buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["cat"]`, name), + true); err != nil { + c.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + c.Fatal(err) + } + if res != "" { + c.Fatalf("Cmd %s, expected nil", res) + } + + res, err = inspectField(name, "Config.Entrypoint") + if err != nil { + c.Fatal(err) + } + if expected := "{[cat]}"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildClearCmd(c *check.C) { + name := "testbuildclearcmd" + _, err := buildImage(name, + `From scratch + ENTRYPOINT ["/bin/bash"] + CMD []`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + c.Fatal(err) + } + if res != "[]" { + c.Fatalf("Cmd %s, expected %s", res, "[]") + } +} + +func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { + name := "testbuildemptycmd" + if _, err := buildImage(name, "FROM scratch\nMAINTAINER quux\n", true); err != nil { + c.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + c.Fatal(err) + } + if res != "null" { + c.Fatalf("Cmd %s, expected %s", res, "null") + } +} + +func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { + name := "testbuildonbuildparent" + if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { + c.Fatal(err) + } + + _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, "Trigger 0, RUN echo foo") { + c.Fatal("failed to find the ONBUILD output", out) + } + +} + +func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { + name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) + _, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true) + // if the error doesnt check for illegal tag name, or the image is built + // then this should fail + if !strings.Contains(out, "Illegal tag name") || strings.Contains(out, "Sending build context to Docker daemon") { + c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) + } +} + +func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { + name := "testbuildcmdshc" + if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { + c.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + c.Fatal(err, res) + } + + expected := `["/bin/sh","-c","echo cmd"]` + + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { + // Test to make sure that when we strcat arrays we take into account + // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't + // look the same + name := "testbuildcmdspaces" + var id1 string + var id2 string + var err error + + if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id1 == id2 { + c.Fatal("Should not have resulted in the same CMD") + } + + // Now do the same with ENTRYPOINT + if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id1 == id2 { + c.Fatal("Should not have resulted in the same ENTRYPOINT") + } + +} + +func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { + name := "testbuildcmdjson" + if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { + c.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + c.Fatal(err, res) + } + + expected := `["echo","cmd"]` + + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildErrorInvalidInstruction(c *check.C) { + name := "testbuildignoreinvalidinstruction" + + out, _, err := buildImageWithOut(name, "FROM busybox\nfoo bar", true) + if err == nil { + c.Fatalf("Should have failed: %s", out) + } + +} + +func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { + + if _, err := buildImage("parent", ` + FROM busybox + ENTRYPOINT exit 130 + `, true); err != nil { + c.Fatal(err) + } + + if _, status, _ := dockerCmdWithError(c, "run", "parent"); status != 130 { + c.Fatalf("expected exit code 130 but received %d", status) + } + + if _, err := buildImage("child", ` + FROM parent + ENTRYPOINT exit 5 + `, true); err != nil { + c.Fatal(err) + } + + if _, status, _ := dockerCmdWithError(c, "run", "child"); status != 5 { + c.Fatalf("expected exit code 5 but received %d", status) + } + +} + +func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) { + var ( + name = "testbuildepinherit" + name2 = "testbuildepinherit2" + expected = `["/bin/sh","-c","echo quux"]` + ) + + if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { + c.Fatal(err) + } + + if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { + c.Fatal(err) + } + + res, err := inspectFieldJSON(name2, "Config.Entrypoint") + if err != nil { + c.Fatal(err, res) + } + + if res != expected { + c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) + } + + out, _ := dockerCmd(c, "run", "-t", name2) + + expected = "quux" + + if strings.TrimSpace(out) != expected { + c.Fatalf("Expected output is %s, got %s", expected, out) + } + +} + +func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { + name := "testbuildentrypoint" + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT /bin/echo`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--rm", name) +} + +func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { + name := "testbuildexoticshellinterpolation" + + _, err := buildImage(name, ` + FROM busybox + + ENV SOME_VAR a.b.c + + RUN [ "$SOME_VAR" = 'a.b.c' ] + RUN [ "${SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR%.*}" = 'a.b' ] + RUN [ "${SOME_VAR%%.*}" = 'a' ] + RUN [ "${SOME_VAR#*.}" = 'b.c' ] + RUN [ "${SOME_VAR##*.}" = 'c' ] + RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] + RUN [ "${#SOME_VAR}" = '5' ] + + RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] + RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] + RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] + `, false) + if err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { + // This testcase is supposed to generate an error because the + // JSON array we're passing in on the CMD uses single quotes instead + // of double quotes (per the JSON spec). This means we interpret it + // as a "string" insead of "JSON array" and pass it on to "sh -c" and + // it should barf on it. + name := "testbuildsinglequotefails" + + if _, err := buildImage(name, + `FROM busybox + CMD [ '/bin/sh', '-c', 'echo hi' ]`, + true); err != nil { + c.Fatal(err) + } + + if _, _, err := dockerCmdWithError(c, "run", "--rm", name); err == nil { + c.Fatal("The image was not supposed to be able to run") + } + +} + +func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { + name := "testbuildverboseout" + + _, out, err := buildImageWithOut(name, + `FROM busybox +RUN echo 123`, + false) + + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "\n123\n") { + c.Fatalf("Output should contain %q: %q", "123", out) + } + +} + +func (s *DockerSuite) TestBuildWithTabs(c *check.C) { + name := "testbuildwithtabs" + _, err := buildImage(name, + "FROM busybox\nRUN echo\tone\t\ttwo", true) + if err != nil { + c.Fatal(err) + } + res, err := inspectFieldJSON(name, "ContainerConfig.Cmd") + if err != nil { + c.Fatal(err) + } + expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` + expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + if res != expected1 && res != expected2 { + c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) + } +} + +func (s *DockerSuite) TestBuildLabels(c *check.C) { + name := "testbuildlabel" + expected := `{"License":"GPL","Vendor":"Acme"}` + _, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme + LABEL License GPL`, + true) + if err != nil { + c.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Labels") + if err != nil { + c.Fatal(err) + } + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { + name := "testbuildlabelcache" + + id1, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme`, false) + if err != nil { + c.Fatalf("Build 1 should have worked: %v", err) + } + + id2, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme`, true) + if err != nil || id1 != id2 { + c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL Vendor=Acme1`, true) + if err != nil || id1 == id2 { + c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL Vendor Acme`, true) // Note: " " and "=" should be same + if err != nil || id1 != id2 { + c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) + } + + // Now make sure the cache isn't used by mistake + id1, err = buildImage(name, + `FROM busybox + LABEL f1=b1 f2=b2`, false) + if err != nil { + c.Fatalf("Build 5 should have worked: %q", err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL f1="b1 f2=b2"`, true) + if err != nil || id1 == id2 { + c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) + } + +} + +func (s *DockerSuite) TestBuildStderr(c *check.C) { + // This test just makes sure that no non-error output goes + // to stderr + name := "testbuildstderr" + _, _, stderr, err := buildImageWithStdoutStderr(name, + "FROM busybox\nRUN echo one", true) + if err != nil { + c.Fatal(err) + } + + if runtime.GOOS == "windows" { + // stderr might contain a security warning on windows + lines := strings.Split(stderr, "\n") + for _, v := range lines { + if v != "" && !strings.Contains(v, "SECURITY WARNING:") { + c.Fatalf("Stderr contains unexpected output line: %q", v) + } + } + } else { + if stderr != "" { + c.Fatalf("Stderr should have been empty, instead its: %q", stderr) + } + } +} + +func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { + testRequires(c, UnixCli) // test uses chown: not available on windows + + name := "testbuildchownsinglefile" + + ctx, err := fakeContext(` +FROM busybox +COPY test / +RUN ls -l /test +RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] +`, map[string]string{ + "test": "test", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { + name := "testbuildsymlinkbreakout" + tmpdir, err := ioutil.TempDir("", name) + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` + from busybox + add symlink.tar / + add inject /symlink/ + `), 0644); err != nil { + c.Fatal(err) + } + inject := filepath.Join(ctx, "inject") + if err := ioutil.WriteFile(inject, nil, 0644); err != nil { + c.Fatal(err) + } + f, err := os.Create(filepath.Join(ctx, "symlink.tar")) + if err != nil { + c.Fatal(err) + } + w := tar.NewWriter(f) + w.WriteHeader(&tar.Header{ + Name: "symlink2", + Typeflag: tar.TypeSymlink, + Linkname: "/../../../../../../../../../../../../../../", + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.WriteHeader(&tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: filepath.Join("symlink2", tmpdir), + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.Close() + f.Close() + if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { + c.Fatal(err) + } + if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { + c.Fatal("symlink breakout - inject") + } else if !os.IsNotExist(err) { + c.Fatalf("unexpected error: %v", err) + } +} + +func (s *DockerSuite) TestBuildXZHost(c *check.C) { + name := "testbuildxzhost" + + ctx, err := fakeContext(` +FROM busybox +ADD xz /usr/local/sbin/ +RUN chmod 755 /usr/local/sbin/xz +ADD test.xz / +RUN [ ! -e /injected ]`, + map[string]string{ + "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + + "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + + "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", + "xz": "#!/bin/sh\ntouch /injected", + }) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { + var ( + name = "testbuildvolumescontent" + expected = "some text" + ) + ctx, err := fakeContext(` +FROM busybox +COPY content /foo/file +VOLUME /foo +CMD cat /foo/file`, + map[string]string{ + "content": expected, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, false); err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", name) + if out != expected { + c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) + } + +} + +func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { + + ctx, err := fakeContext(`FROM busybox + RUN echo from Dockerfile`, + map[string]string{ + "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", + "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", + "files/dFile": "FROM busybox\nRUN echo from files/dFile", + "dFile": "FROM busybox\nRUN echo from dFile", + "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test1 should have used Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from files/Dockerfile") { + c.Fatalf("test2 should have used files/Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from files/dFile") { + c.Fatalf("test3 should have used files/dFile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from dFile") { + c.Fatalf("test4 should have used dFile, output:%s", out) + } + + dirWithNoDockerfile, _ := ioutil.TempDir(os.TempDir(), "test5") + nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") + if _, err = os.Create(nonDockerfileFile); err != nil { + c.Fatal(err) + } + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") + + if err == nil { + c.Fatalf("test5 was supposed to fail to find passwd") + } + + if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) { + c.Fatalf("wrong error messsage:%v\nexpected to contain=%v", out, expected) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") + if err != nil { + c.Fatalf("test6 failed: %s", err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test6 should have used root Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") + if err != nil { + c.Fatalf("test7 failed: %s", err) + } + if !strings.Contains(out, "from files/Dockerfile") { + c.Fatalf("test7 should have used files Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") + if err == nil || !strings.Contains(out, "must be within the build context") { + c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) + } + + tmpDir := os.TempDir() + out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir) + if err != nil { + c.Fatalf("test9 - failed: %s", err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test9 should have used root Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") + if err != nil { + c.Fatalf("test10 should have worked: %s", err) + } + if !strings.Contains(out, "from files/dFile2") { + c.Fatalf("test10 should have used files/dFile2, output:%s", out) + } + +} + +func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + + ctx, err := fakeContext(`FROM busybox + RUN echo from dockerfile`, + map[string]string{ + "dockerfile": "FROM busybox\nRUN echo from dockerfile", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + + ctx, err := fakeContext(`FROM busybox +RUN echo from Dockerfile`, + map[string]string{ + "dockerfile": "FROM busybox\nRUN echo from dockerfile", + }) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { + + server, err := fakeStorage(map[string]string{"baz": `FROM busybox +RUN echo from baz +COPY * /tmp/ +RUN find /tmp/`}) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(`FROM busybox +RUN echo from Dockerfile`, + map[string]string{}) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from baz") || + strings.Contains(out, "/tmp/baz") || + !strings.Contains(out, "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { + + ctx, err := fakeContext(`FROM busybox +RUN echo from Dockerfile`, + map[string]string{}) + defer ctx.Close() + if err != nil { + c.Fatal(err) + } + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-") + dockerCommand.Dir = ctx.Dir + dockerCommand.Stdin = strings.NewReader(`FROM busybox +RUN echo from baz +COPY * /tmp/ +RUN find /tmp/`) + out, status, err := runCommandWithOutput(dockerCommand) + if err != nil || status != 0 { + c.Fatalf("Error building: %s", err) + } + + if !strings.Contains(out, "from baz") || + strings.Contains(out, "/tmp/baz") || + !strings.Contains(out, "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { + name := "testbuildfromofficial" + fromNames := []string{ + "busybox", + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + for idx, fromName := range fromNames { + imgName := fmt.Sprintf("%s%d", name, idx) + _, err := buildImage(imgName, "FROM "+fromName, true) + if err != nil { + c.Errorf("Build failed using FROM %s: %s", fromName, err) + } + deleteImages(imgName) + } +} + +func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { + testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) + + name := "testbuilddockerfileoutsidecontext" + tmpdir, err := ioutil.TempDir("", name) + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { + c.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(wd) + if err := os.Chdir(ctx); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { + c.Fatal(err) + } + + for _, dockerfilePath := range []string{ + filepath.Join("..", "outsideDockerfile"), + filepath.Join(ctx, "dockerfile1"), + filepath.Join(ctx, "dockerfile2"), + } { + out, _, err := dockerCmdWithError(c, "build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") + if err == nil { + c.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out) + } + if !strings.Contains(out, "must be within the build context") && !strings.Contains(out, "Cannot locate Dockerfile") { + c.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out) + } + deleteImages(name) + } + + os.Chdir(tmpdir) + + // Path to Dockerfile should be resolved relative to working directory, not relative to context. + // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail + out, _, err := dockerCmdWithError(c, "build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) + if err == nil { + c.Fatalf("Expected error. Out: %s", out) + } +} + +func (s *DockerSuite) TestBuildSpaces(c *check.C) { + // Test to make sure that leading/trailing spaces on a command + // doesn't change the error msg we get + var ( + err1 error + err2 error + ) + + name := "testspaces" + ctx, err := fakeContext("FROM busybox\nCOPY\n", + map[string]string{ + "Dockerfile": "FROM busybox\nCOPY\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { + c.Fatal("Build 1 was supposed to fail, but didn't") + } + + ctx.Add("Dockerfile", "FROM busybox\nCOPY ") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 2 was supposed to fail, but didn't") + } + + removeLogTimestamps := func(s string) string { + return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) + } + + // Skip over the times + e1 := removeLogTimestamps(err1.Error()) + e2 := removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 3 was supposed to fail, but didn't") + } + + // Skip over the times + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY ") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 4 was supposed to fail, but didn't") + } + + // Skip over the times + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) + } + +} + +func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { + // Test to make sure that spaces in quotes aren't lost + name := "testspacesquotes" + + dockerfile := `FROM busybox +RUN echo " \ + foo "` + + _, out, err := buildImageWithOut(name, dockerfile, false) + if err != nil { + c.Fatal("Build failed:", err) + } + + expecting := "\n foo \n" + if !strings.Contains(out, expecting) { + c.Fatalf("Bad output: %q expecting to contain %q", out, expecting) + } + +} + +// #4393 +func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") + buildCmd.Stdin = strings.NewReader(` + FROM busybox + RUN touch /foo + VOLUME /foo + `) + + out, _, err := runCommandWithOutput(buildCmd) + if err == nil || !strings.Contains(out, "file exists") { + c.Fatalf("expected build to fail when file exists in container at requested volume path") + } + +} + +func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { + // Test to make sure that all Dockerfile commands (except the ones listed + // in skipCmds) will generate an error if no args are provided. + // Note: INSERT is deprecated so we exclude it because of that. + skipCmds := map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + } + + for cmd := range command.Commands { + cmd = strings.ToUpper(cmd) + if _, ok := skipCmds[cmd]; ok { + continue + } + + var dockerfile string + if cmd == "FROM" { + dockerfile = cmd + } else { + // Add FROM to make sure we don't complain about it missing + dockerfile = "FROM busybox\n" + cmd + } + + ctx, err := fakeContext(dockerfile, map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + var out string + if out, err = buildImageFromContext("args", ctx, true); err == nil { + c.Fatalf("%s was supposed to fail. Out:%s", cmd, out) + } + if !strings.Contains(err.Error(), cmd+" requires") { + c.Fatalf("%s returned the wrong type of error:%s", cmd, err) + } + } + +} + +func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { + _, out, err := buildImageWithOut("sc", "FROM scratch", true) + if err == nil { + c.Fatalf("Build was supposed to fail") + } + if !strings.Contains(out, "No image was generated") { + c.Fatalf("Wrong error message: %v", out) + } +} + +func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { + ctx, err := fakeContext("FROM busybox\n", + map[string]string{ + "..gitme": "", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext("sc", ctx, false); err != nil { + c.Fatalf("Build was supposed to work: %s", err) + } +} + +func (s *DockerSuite) TestBuildNotVerbose(c *check.C) { + + ctx, err := fakeContext("FROM busybox\nENV abc=hi\nRUN echo $abc there", map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + // First do it w/verbose - baseline + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", "verbose", ".") + if err != nil { + c.Fatalf("failed to build the image w/o -q: %s, %v", out, err) + } + if !strings.Contains(out, "hi there") { + c.Fatalf("missing output:%s\n", out) + } + + // Now do it w/o verbose + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-q", "-t", "verbose", ".") + if err != nil { + c.Fatalf("failed to build the image w/ -q: %s, %v", out, err) + } + if strings.Contains(out, "hi there") { + c.Fatalf("Bad output, should not contain 'hi there':%s", out) + } + +} + +func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { + name := "testbuildrunonejson" + + ctx, err := fakeContext(`FROM hello-world:frozen +RUN [ "/hello" ]`, map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".") + if err != nil { + c.Fatalf("failed to build the image: %s, %v", out, err) + } + + if !strings.Contains(out, "Hello from Docker") { + c.Fatalf("bad output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { + name := "testbuildemptystringvolume" + + _, err := buildImage(name, ` + FROM busybox + ENV foo="" + VOLUME $foo + `, false) + if err == nil { + c.Fatal("Should have failed to build") + } + +} + +func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { + testRequires(c, NativeExecDriver) + testRequires(c, SameHostDaemon) + + cgroupParent := "test" + data, err := ioutil.ReadFile("/proc/self/cgroup") + if err != nil { + c.Fatalf("failed to read '/proc/self/cgroup - %v", err) + } + selfCgroupPaths := parseCgroupPaths(string(data)) + _, found := selfCgroupPaths["memory"] + if !found { + c.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths) + } + cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-") + cmd.Stdin = strings.NewReader(` +FROM busybox +RUN cat /proc/self/cgroup +`) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } +} + +func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { + // Check to make sure our build output prints the Dockerfile cmd + // property - there was a bug that caused it to be duplicated on the + // Step X line + name := "testbuildnodupoutput" + + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN env`, false) + if err != nil { + c.Fatalf("Build should have worked: %q", err) + } + + exp := "\nStep 1 : RUN env\n" + if !strings.Contains(out, exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + } +} + +func (s *DockerSuite) TestBuildBadCmdFlag(c *check.C) { + name := "testbuildbadcmdflag" + + _, out, err := buildImageWithOut(name, ` + FROM busybox + MAINTAINER --boo joe@example.com`, false) + if err == nil { + c.Fatal("Build should have failed") + } + + exp := "\nUnknown flag: boo\n" + if !strings.Contains(out, exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + } +} + +func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { + // Test to make sure the bad command is quoted with just "s and + // not as a Go []string + name := "testbuildbadrunerrmsg" + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3 + if err == nil { + c.Fatal("Should have failed to build") + } + + exp := `The command '/bin/sh -c badEXE a1 \& a2 a3' returned a non-zero code: 127` + if !strings.Contains(out, exp) { + c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp) + } +} + +func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-build") + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuild" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err := runCommandWithOutput(buildCmd) + if err != nil { + c.Fatalf("Error running trusted build: %s\n%s", err, out) + } + + if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) { + c.Fatalf("Unexpected output on trusted build:\n%s", out) + } + + // We should also have a tag reference for the image. + if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 { + c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) + } + + // We should now be able to remove the tag reference. + if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 { + c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) + } +} + +func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuilduntrustedtag" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out) + } + + if !strings.Contains(out, fmt.Sprintf("no trust data available")) { + c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { + tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tempDir) + + // Make a real context directory in this temp directory with a simple + // Dockerfile. + realContextDirname := filepath.Join(tempDir, "context") + if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { + c.Fatal(err) + } + + if err = ioutil.WriteFile( + filepath.Join(realContextDirname, "Dockerfile"), + []byte(` + FROM busybox + RUN echo hello world + `), + os.FileMode(0644), + ); err != nil { + c.Fatal(err) + } + + // Make a symlink to the real context directory. + contextSymlinkName := filepath.Join(tempDir, "context_link") + if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { + c.Fatal(err) + } + + // Executing the build with the symlink as the specified context should + // *not* fail. + if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 { + c.Fatalf("build failed with exit status %d: %s", exitStatus, out) + } +} diff --git a/integration-cli/docker_cli_build_unix_test.go b/integration-cli/docker_cli_build_unix_test.go new file mode 100644 index 00000000..fc7bd925 --- /dev/null +++ b/integration-cli/docker_cli_build_unix_test.go @@ -0,0 +1,72 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "strings" + + "github.com/docker/docker/pkg/ulimit" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { + testRequires(c, cpuCfsQuota) + name := "testbuildresourceconstraints" + + ctx, err := fakeContext(` + FROM hello-world:frozen + RUN ["/hello"] + `, map[string]string{}) + if err != nil { + c.Fatal(err) + } + + dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, ".") + + out, _ := dockerCmd(c, "ps", "-lq") + + cID := strings.TrimSpace(out) + + type hostConfig struct { + Memory int64 + MemorySwap int64 + CpusetCpus string + CpusetMems string + CPUShares int64 + CPUQuota int64 + Ulimits []*ulimit.Ulimit + } + + cfg, err := inspectFieldJSON(cID, "HostConfig") + if err != nil { + c.Fatal(err) + } + + var c1 hostConfig + if err := json.Unmarshal([]byte(cfg), &c1); err != nil { + c.Fatal(err, cfg) + } + if c1.Memory != 67108864 || c1.MemorySwap != -1 || c1.CpusetCpus != "0" || c1.CpusetMems != "0" || c1.CPUShares != 100 || c1.CPUQuota != 8000 || c1.Ulimits[0].Name != "nofile" || c1.Ulimits[0].Hard != 42 { + c.Fatalf("resource constraints not set properly:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpusetMems: %s, CPUShares: %d, CPUQuota: %d, Ulimits: %s", + c1.Memory, c1.MemorySwap, c1.CpusetCpus, c1.CpusetMems, c1.CPUShares, c1.CPUQuota, c1.Ulimits[0]) + } + + // Make sure constraints aren't saved to image + dockerCmd(c, "run", "--name=test", name) + + cfg, err = inspectFieldJSON("test", "HostConfig") + if err != nil { + c.Fatal(err) + } + var c2 hostConfig + if err := json.Unmarshal([]byte(cfg), &c2); err != nil { + c.Fatal(err, cfg) + } + if c2.Memory == 67108864 || c2.MemorySwap == -1 || c2.CpusetCpus == "0" || c2.CpusetMems == "0" || c2.CPUShares == 100 || c2.CPUQuota == 8000 || c2.Ulimits != nil { + c.Fatalf("resource constraints leaked from build:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpusetMems: %s, CPUShares: %d, CPUQuota: %d, Ulimits: %s", + c2.Memory, c2.MemorySwap, c2.CpusetCpus, c2.CpusetMems, c2.CPUShares, c2.CPUQuota, c2.Ulimits) + + } + +} diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go new file mode 100644 index 00000000..71f8b1a8 --- /dev/null +++ b/integration-cli/docker_cli_by_digest_test.go @@ -0,0 +1,499 @@ +package main + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/docker/utils" + "github.com/go-check/check" +) + +var ( + remoteRepoName = "dockercli/busybox-by-dgst" + repoName = fmt.Sprintf("%v/%s", privateRegistryURL, remoteRepoName) + pushDigestRegex = regexp.MustCompile("[\\S]+: digest: ([\\S]+) size: [0-9]+") + digestRegex = regexp.MustCompile("Digest: ([\\S]+)") +) + +func setupImage(c *check.C) (digest.Digest, error) { + return setupImageWithTag(c, "latest") +} + +func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { + containerName := "busyboxbydigest" + + dockerCmd(c, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox") + + // tag the image to upload it to the private registry + repoAndTag := utils.ImageReference(repoName, tag) + if out, _, err := dockerCmdWithError(c, "commit", containerName, repoAndTag); err != nil { + return "", fmt.Errorf("image tagging failed: %s, %v", out, err) + } + + // delete the container as we don't need it any more + if err := deleteContainer(containerName); err != nil { + return "", err + } + + // push the image + out, _, err := dockerCmdWithError(c, "push", repoAndTag) + if err != nil { + return "", fmt.Errorf("pushing the image to the private registry has failed: %s, %v", out, err) + } + + // delete our local repo that we previously tagged + if rmiout, _, err := dockerCmdWithError(c, "rmi", repoAndTag); err != nil { + return "", fmt.Errorf("error deleting images prior to real test: %s, %v", rmiout, err) + } + + matches := pushDigestRegex.FindStringSubmatch(out) + if len(matches) != 2 { + return "", fmt.Errorf("unable to parse digest from push output: %s", out) + } + pushDigest := matches[1] + + return digest.Digest(pushDigest), nil +} + +func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + pushDigest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + // pull from the registry using the tag + out, _ := dockerCmd(c, "pull", repoName) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + if len(matches) != 2 { + c.Fatalf("unable to parse digest from pull output: %s", out) + } + pullDigest := matches[1] + + // make sure the pushed and pull digests match + if pushDigest.String() != pullDigest { + c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest) + } +} + +func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { + pushDigest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + out, _ := dockerCmd(c, "pull", imageReference) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + if len(matches) != 2 { + c.Fatalf("unable to parse digest from pull output: %s", out) + } + pullDigest := matches[1] + + // make sure the pushed and pull digests match + if pushDigest.String() != pullDigest { + c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest) + } +} + +func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) { + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName) + out, _, err := dockerCmdWithError(c, "pull", imageReference) + if err == nil || !strings.Contains(out, "manifest unknown") { + c.Fatalf("expected non-zero exit status and correct error message when pulling non-existing image: %s", out) + } +} + +func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { + pushDigest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "createByDigest" + out, _ := dockerCmd(c, "create", "--name", containerName, imageReference) + + res, err := inspectField(containerName, "Config.Image") + if err != nil { + c.Fatalf("failed to get Config.Image: %s, %v", out, err) + } + if res != imageReference { + c.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference) + } +} + +func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) { + pushDigest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "runByDigest" + out, _ := dockerCmd(c, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") + + foundRegex := regexp.MustCompile("found=([^\n]+)") + matches := foundRegex.FindStringSubmatch(out) + if len(matches) != 2 { + c.Fatalf("error locating expected 'found=1' output: %s", out) + } + if matches[1] != "1" { + c.Fatalf("Expected %q, got %q", "1", matches[1]) + } + + res, err := inspectField(containerName, "Config.Image") + if err != nil { + c.Fatalf("failed to get Config.Image: %s, %v", out, err) + } + if res != imageReference { + c.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference) + } +} + +func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) { + digest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // make sure inspect runs ok + if _, err := inspectField(imageReference, "Id"); err != nil { + c.Fatalf("failed to inspect image: %v", err) + } + + // do the delete + if err := deleteImages(imageReference); err != nil { + c.Fatalf("unexpected error deleting image: %v", err) + } + + // try to inspect again - it should error this time + if _, err := inspectField(imageReference, "Id"); err == nil { + c.Fatalf("unexpected nil err trying to inspect what should be a non-existent image") + } else if !strings.Contains(err.Error(), "No such image") { + c.Fatalf("expected 'No such image' output, got %v", err) + } +} + +func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { + digest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // get the image id + imageID, err := inspectField(imageReference, "Id") + if err != nil { + c.Fatalf("error getting image id: %v", err) + } + + // do the build + name := "buildbydigest" + _, err = buildImage(name, fmt.Sprintf( + `FROM %s + CMD ["/bin/echo", "Hello World"]`, imageReference), + true) + if err != nil { + c.Fatal(err) + } + + // get the build's image id + res, err := inspectField(name, "Config.Image") + if err != nil { + c.Fatal(err) + } + // make sure they match + if res != imageID { + c.Fatalf("Image %s, expected %s", res, imageID) + } +} + +func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) { + digest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // tag it + tag := "tagbydigest" + dockerCmd(c, "tag", imageReference, tag) + + expectedID, err := inspectField(imageReference, "Id") + if err != nil { + c.Fatalf("error getting original image id: %v", err) + } + + tagID, err := inspectField(tag, "Id") + if err != nil { + c.Fatalf("error getting tagged image id: %v", err) + } + + if tagID != expectedID { + c.Fatalf("expected image id %q, got %q", expectedID, tagID) + } +} + +func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) { + digest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "images") + + if strings.Contains(out, "DIGEST") { + c.Fatalf("list output should not have contained DIGEST header: %s", out) + } + +} + +func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) { + + // setup image1 + digest1, err := setupImageWithTag(c, "tag1") + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + if !re1.MatchString(out) { + c.Fatalf("expected %q: %s", re1.String(), out) + } + + // setup image2 + digest2, err := setupImageWithTag(c, "tag2") + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + if !re1.MatchString(out) { + c.Fatalf("expected %q: %s", re1.String(), out) + } + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + if !re2.MatchString(out) { + c.Fatalf("expected %q: %s", re2.String(), out) + } + + // pull tag1 + dockerCmd(c, "pull", repoName+":tag1") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithTag1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*\s`) + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + if !reWithTag1.MatchString(out) { + c.Fatalf("expected %q: %s", reWithTag1.String(), out) + } + if !reWithDigest1.MatchString(out) { + c.Fatalf("expected %q: %s", reWithDigest1.String(), out) + } + // make sure image 2 has repo, , digest + if !re2.MatchString(out) { + c.Fatalf("expected %q: %s", re2.String(), out) + } + + // pull tag 2 + dockerCmd(c, "pull", repoName+":tag2") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + if !reWithTag1.MatchString(out) { + c.Fatalf("expected %q: %s", re1.String(), out) + } + + // make sure image 2 has repo, tag, digest + reWithTag2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*\s`) + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + if !reWithTag2.MatchString(out) { + c.Fatalf("expected %q: %s", reWithTag2.String(), out) + } + if !reWithDigest2.MatchString(out) { + c.Fatalf("expected %q: %s", reWithDigest2.String(), out) + } + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + if !reWithTag1.MatchString(out) { + c.Fatalf("expected %q: %s", re1.String(), out) + } + // make sure image 2 has repo, tag, digest + if !reWithTag2.MatchString(out) { + c.Fatalf("expected %q: %s", re2.String(), out) + } + // make sure busybox has tag, but not digest + busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) + if !busyboxRe.MatchString(out) { + c.Fatalf("expected %q: %s", busyboxRe.String(), out) + } +} + +func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { + pushDigest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + // just in case... + + imageID, err := inspectField(imageReference, "Id") + if err != nil { + c.Fatalf("error inspecting image id: %v", err) + } + + dockerCmd(c, "rmi", imageID) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + manifestDigest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest manifest.Manifest + if err := json.Unmarshal(manifestBlob, &imgManifest); err != nil { + c.Fatalf("unable to decode image manifest from blob: %s", err) + } + + // Add a malicious layer digest to the list of layers in the manifest. + imgManifest.FSLayers = append(imgManifest.FSLayers, manifest.FSLayer{ + BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), + }) + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.Marshal(imgManifest) + if err != nil { + c.Fatalf("unable to encode altered image manifest to JSON: %s", err) + } + + s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError(c, "pull", imageReference) + if exitStatus == 0 { + c.Fatalf("expected a non-zero exit status but got %d: %s", exitStatus, out) + } + + expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest) + if !strings.Contains(out, expectedErrorMsg) { + c.Fatalf("expected error message %q in output: %s", expectedErrorMsg, out) + } +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + manifestDigest, err := setupImage(c) + if err != nil { + c.Fatalf("error setting up image: %v", err) + } + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest manifest.Manifest + if err := json.Unmarshal(manifestBlob, &imgManifest); err != nil { + c.Fatalf("unable to decode image manifest from blob: %s", err) + } + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.FSLayers[0].BlobSum + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError(c, "pull", imageReference) + if exitStatus == 0 { + c.Fatalf("expected a zero exit status but got: %d", exitStatus) + } + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + if !strings.Contains(out, expectedErrorMsg) { + c.Fatalf("expected error message %q in output: %s", expectedErrorMsg, out) + } +} diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go new file mode 100644 index 00000000..125b2e39 --- /dev/null +++ b/integration-cli/docker_cli_commit_test.go @@ -0,0 +1,215 @@ +package main + +import ( + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", "-p=false", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +//test commit a paused container should not unpause it after commit +func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { + defer unpauseAllContainers() + out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + out, err := inspectField(cleanedContainerID, "State.Paused") + c.Assert(err, check.IsNil) + if !strings.Contains(out, "true") { + c.Fatalf("commit should not unpause a paused container") + } +} + +func (s *DockerSuite) TestCommitNewFile(c *check.C) { + + dockerCmd(c, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") + + imageID, _ := dockerCmd(c, "commit", "commiter") + imageID = strings.Trim(imageID, "\r\n") + + out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") + + if actual := strings.Trim(out, "\r\n"); actual != "koye" { + c.Fatalf("expected output koye received %q", actual) + } + +} + +func (s *DockerSuite) TestCommitHardlink(c *check.C) { + + firstOutput, _ := dockerCmd(c, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") + + chunks := strings.Split(strings.TrimSpace(firstOutput), " ") + inode := chunks[0] + found := false + for _, chunk := range chunks[1:] { + if chunk == inode { + found = true + break + } + } + if !found { + c.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) + } + + imageID, _ := dockerCmd(c, "commit", "hardlinks", "hardlinks") + imageID = strings.Trim(imageID, "\r\n") + + secondOutput, _ := dockerCmd(c, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") + + chunks = strings.Split(strings.TrimSpace(secondOutput), " ") + inode = chunks[0] + found = false + for _, chunk := range chunks[1:] { + if chunk == inode { + found = true + break + } + } + if !found { + c.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) + } + +} + +func (s *DockerSuite) TestCommitTTY(c *check.C) { + + dockerCmd(c, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + imageID, _ := dockerCmd(c, "commit", "tty", "ttytest") + imageID = strings.Trim(imageID, "\r\n") + + dockerCmd(c, "run", "ttytest", "/bin/ls") + +} + +func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { + + dockerCmd(c, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", "bind-commit", "bindtest") + imageID = strings.Trim(imageID, "\r\n") + + dockerCmd(c, "run", "bindtest", "true") + +} + +func (s *DockerSuite) TestCommitChange(c *check.C) { + + dockerCmd(c, "run", "--name", "test", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "EXPOSE 8080", + "--change", "ENV DEBUG true", + "--change", "ENV test 1", + "--change", "ENV PATH /foo", + "--change", "LABEL foo bar", + "--change", "CMD [\"/bin/sh\"]", + "--change", "WORKDIR /opt", + "--change", "ENTRYPOINT [\"/bin/sh\"]", + "--change", "USER testuser", + "--change", "VOLUME /var/lib/docker", + "--change", "ONBUILD /usr/local/bin/python-build --dir /app/src", + "test", "test-commit") + imageID = strings.Trim(imageID, "\r\n") + + expected := map[string]string{ + "Config.ExposedPorts": "map[8080/tcp:{}]", + "Config.Env": "[DEBUG=true test=1 PATH=/foo]", + "Config.Labels": "map[foo:bar]", + "Config.Cmd": "{[/bin/sh]}", + "Config.WorkingDir": "/opt", + "Config.Entrypoint": "{[/bin/sh]}", + "Config.User": "testuser", + "Config.Volumes": "map[/var/lib/docker:{}]", + "Config.OnBuild": "[/usr/local/bin/python-build --dir /app/src]", + } + + for conf, value := range expected { + res, err := inspectField(imageID, conf) + c.Assert(err, check.IsNil) + if res != value { + c.Errorf("%s('%s'), expected %s", conf, res, value) + } + } + +} + +// TODO: commit --run is deprecated, remove this once --run is removed +func (s *DockerSuite) TestCommitMergeConfigRun(c *check.C) { + name := "commit-test" + out, _ := dockerCmd(c, "run", "-d", "-e=FOO=bar", "busybox", "/bin/sh", "-c", "echo testing > /tmp/foo") + id := strings.TrimSpace(out) + + dockerCmd(c, "commit", `--run={"Cmd": ["cat", "/tmp/foo"]}`, id, "commit-test") + + out, _ = dockerCmd(c, "run", "--name", name, "commit-test") + if strings.TrimSpace(out) != "testing" { + c.Fatal("run config in committed container was not merged") + } + + type cfg struct { + Env []string + Cmd []string + } + config1 := cfg{} + if err := inspectFieldAndMarshall(id, "Config", &config1); err != nil { + c.Fatal(err) + } + config2 := cfg{} + if err := inspectFieldAndMarshall(name, "Config", &config2); err != nil { + c.Fatal(err) + } + + // Env has at least PATH loaded as well here, so let's just grab the FOO one + var env1, env2 string + for _, e := range config1.Env { + if strings.HasPrefix(e, "FOO") { + env1 = e + break + } + } + for _, e := range config2.Env { + if strings.HasPrefix(e, "FOO") { + env2 = e + break + } + } + + if len(config1.Env) != len(config2.Env) || env1 != env2 && env2 != "" { + c.Fatalf("expected envs to match: %v - %v", config1.Env, config2.Env) + } + +} diff --git a/integration-cli/docker_cli_config_test.go b/integration-cli/docker_cli_config_test.go new file mode 100644 index 00000000..2b08f47e --- /dev/null +++ b/integration-cli/docker_cli_config_test.go @@ -0,0 +1,147 @@ +package main + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/pkg/homedir" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestConfigHttpHeader(c *check.C) { + testRequires(c, UnixCli) // Can't set/unset HOME on windows right now + // We either need a level of Go that supports Unsetenv (for cases + // when HOME/USERPROFILE isn't set), or we need to be able to use + // os/user but user.Current() only works if we aren't statically compiling + + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + })) + defer server.Close() + + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, _ := ioutil.TempDir("", "fake-home") + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + err := ioutil.WriteFile(tmpCfg, []byte(data), 0600) + if err != nil { + c.Fatalf("Err creating file(%s): %v", tmpCfg, err) + } + + cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") + out, _, _ := runCommandWithOutput(cmd) + + if headers["User-Agent"] == nil { + c.Fatalf("Missing User-Agent: %q\nout:%v", headers, out) + } + + if headers["User-Agent"][0] != "Docker-Client/"+dockerversion.VERSION+" ("+runtime.GOOS+")" { + c.Fatalf("Badly formatted User-Agent: %q\nout:%v", headers, out) + } + + if headers["Myheader"] == nil || headers["Myheader"][0] != "MyValue" { + c.Fatalf("Missing/bad header: %q\nout:%v", headers, out) + } +} + +func (s *DockerSuite) TestConfigDir(c *check.C) { + cDir, _ := ioutil.TempDir("", "fake-home") + + // First make sure pointing to empty dir doesn't generate an error + out, rc := dockerCmd(c, "--config", cDir, "ps") + + if rc != 0 { + c.Fatalf("ps1 didn't work:\nrc:%d\nout%s", rc, out) + } + + // Test with env var too + cmd := exec.Command(dockerBinary, "ps") + cmd.Env = append(os.Environ(), "DOCKER_CONFIG="+cDir) + out, rc, err := runCommandWithOutput(cmd) + + if rc != 0 || err != nil { + c.Fatalf("ps2 didn't work:\nrc:%d\nout%s\nerr:%v", rc, out, err) + } + + // Start a server so we can check to see if the config file was + // loaded properly + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + })) + defer server.Close() + + // Create a dummy config file in our new config dir + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + tmpCfg := filepath.Join(cDir, "config.json") + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + if err != nil { + c.Fatalf("Err creating file(%s): %v", tmpCfg, err) + } + + cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") + out, _, _ = runCommandWithOutput(cmd) + + if headers["Myheader"] == nil || headers["Myheader"][0] != "MyValue" { + c.Fatalf("ps3 - Missing header: %q\nout:%v", headers, out) + } + + // Reset headers and try again using env var this time + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") + cmd.Env = append(os.Environ(), "DOCKER_CONFIG="+cDir) + out, _, _ = runCommandWithOutput(cmd) + + if headers["Myheader"] == nil || headers["Myheader"][0] != "MyValue" { + c.Fatalf("ps4 - Missing header: %q\nout:%v", headers, out) + } + + // Reset headers and make sure flag overrides the env var + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") + cmd.Env = append(os.Environ(), "DOCKER_CONFIG=MissingDir") + out, _, _ = runCommandWithOutput(cmd) + + if headers["Myheader"] == nil || headers["Myheader"][0] != "MyValue" { + c.Fatalf("ps5 - Missing header: %q\nout:%v", headers, out) + } + + // Reset headers and make sure flag overrides the env var. + // Almost same as previous but make sure the "MissingDir" isn't + // ignore - we don't want to default back to the env var. + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "--config", "MissingDir", "-H="+server.URL[7:], "ps") + cmd.Env = append(os.Environ(), "DOCKER_CONFIG="+cDir) + out, _, _ = runCommandWithOutput(cmd) + + if headers["Myheader"] != nil { + c.Fatalf("ps6 - Headers are there but shouldn't be: %q\nout:%v", headers, out) + } + +} diff --git a/integration-cli/docker_cli_cp_from_container_test.go b/integration-cli/docker_cli_cp_from_container_test.go new file mode 100644 index 00000000..945a34f4 --- /dev/null +++ b/integration-cli/docker_cli_cp_from_container_test.go @@ -0,0 +1,611 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/go-check/check" +) + +// docker cp CONTAINER:PATH LOCALPATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPath(cID, "file1"), tmpDir) + if err == nil { + c.Fatal("expected IsNotExist error, but got nil instead") + } + + if !isCpNotExist(err) { + c.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPathTrailingSep(cID, "file1"), tmpDir) + if err == nil { + c.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isCpNotDir(err) { + c.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(cID, "/file1") + dstPath := cpPath(tmpDir, "notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath) + if err == nil { + c.Fatal("expected IsNotExist error, but got nil instead") + } + + if !isCpNotExist(err) { + c.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcPath = containerCpPath(cID, "/dir1") + + if err := runDockerCp(c, srcPath, dstPath); err == nil { + c.Fatal("expected IsNotExist error, but got nil instead") + } + + if !isCpNotExist(err) { + c.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(cID, "/file1") + dstPath := cpPathTrailingSep(tmpDir, "file1") + + err := runDockerCp(c, srcPath, dstPath) + if err == nil { + c.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isCpNotDir(err) { + c.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcPath = containerCpPath(cID, "/dir1") + + if err := runDockerCp(c, srcPath, dstPath); err == nil { + c.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isCpNotDir(err) { + c.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Check that copying from a container to a local symlink copies to the symlink +// target and does not overwrite the local symlink itself. +func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // First, copy a file from the container to a symlink to a file. This + // should overwrite the symlink target contents with the source contents. + srcPath := containerCpPath(cID, "/file2") + dstPath := cpPath(tmpDir, "symlinkToFile1") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, dstPath, "file1"); err != nil { + c.Fatal(err) + } + + // The file should have the contents of "file2" now. + if err := fileContentEquals(c, cpPath(tmpDir, "file1"), "file2\n"); err != nil { + c.Fatal(err) + } + + // Next, copy a file from the container to a symlink to a directory. This + // should copy the file into the symlink target directory. + dstPath = cpPath(tmpDir, "symlinkToDir1") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, dstPath, "dir1"); err != nil { + c.Fatal(err) + } + + // The file should have the contents of "file2" now. + if err := fileContentEquals(c, cpPath(tmpDir, "file2"), "file2\n"); err != nil { + c.Fatal(err) + } + + // Next, copy a file from the container to a symlink to a file that does + // not exist (a broken symlink). This should create the target file with + // the contents of the source file. + dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, dstPath, "fileX"); err != nil { + c.Fatal(err) + } + + // The file should have the contents of "file2" now. + if err := fileContentEquals(c, cpPath(tmpDir, "fileX"), "file2\n"); err != nil { + c.Fatal(err) + } + + // Next, copy a directory from the container to a symlink to a local + // directory. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = containerCpPath(cID, "/dir2") + dstPath = cpPath(tmpDir, "symlinkToDir1") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, dstPath, "dir1"); err != nil { + c.Fatal(err) + } + + // The directory should now contain a copy of "dir2". + if err := fileContentEquals(c, cpPath(tmpDir, "dir1/dir2/file2-1"), "file2-1\n"); err != nil { + c.Fatal(err) + } + + // Next, copy a directory from the container to a symlink to a local + // directory that does not exist (a broken symlink). This should create + // the target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, dstPath, "dirX"); err != nil { + c.Fatal(err) + } + + // The "dirX" directory should now be a copy of "dir2". + if err := fileContentEquals(c, cpPath(tmpDir, "dirX/file2-1"), "file2-1\n"); err != nil { + c.Fatal(err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpFromCaseA(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-a") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(cID, "/root/file1") + dstPath := cpPath(tmpDir, "itWorks.txt") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1\n"); err != nil { + c.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpFromCaseB(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-b") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(cID, "/file1") + dstDir := cpPathTrailingSep(tmpDir, "testDir") + + err := runDockerCp(c, srcPath, dstDir) + if err == nil { + c.Fatal("expected DirNotExists error, but got nil instead") + } + + if !isCpDirNotExist(err) { + c.Fatalf("expected DirNotExists error, but got %T: %s", err, err) + } +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpFromCaseC(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(cID, "/root/file1") + dstPath := cpPath(tmpDir, "file2") + + // Ensure the local file starts with different content. + if err := fileContentEquals(c, dstPath, "file2\n"); err != nil { + c.Fatal(err) + } + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1\n"); err != nil { + c.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseD(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(cID, "/file1") + dstDir := cpPath(tmpDir, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + // Ensure that dstPath doesn't exist. + if _, err := os.Stat(dstPath); !os.IsNotExist(err) { + c.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err := runDockerCp(c, srcPath, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err := os.RemoveAll(dstDir); err != nil { + c.Fatalf("unable to remove dstDir: %s", err) + } + + if err := os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + c.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = cpPathTrailingSep(tmpDir, "dir1") + + if err := runDockerCp(c, srcPath, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1\n"); err != nil { + c.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpFromCaseE(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-e") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPath(cID, "dir1") + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err := os.RemoveAll(dstDir); err != nil { + c.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil { + c.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpFromCaseF(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(cID, "/root/dir1") + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile) + if err == nil { + c.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if !isCpCannotCopyDir(err) { + c.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseG(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(cID, "/root/dir1") + dstDir := cpPath(tmpDir, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + dstPath := filepath.Join(resultDir, "file1-1") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err := os.RemoveAll(dstDir); err != nil { + c.Fatalf("unable to remove dstDir: %s", err) + } + + if err := os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + c.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil { + c.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseH(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-h") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPathTrailingSep(cID, "dir1") + "." + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err := os.RemoveAll(dstDir); err != nil { + c.Fatalf("unable to remove resultDir: %s", err) + } + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil { + c.Fatal(err) + } +} + +// I. SRC specifies a direcotry's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpFromCaseI(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(cID, "/root/dir1") + "." + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile) + if err == nil { + c.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if !isCpCannotCopyDir(err) { + c.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-from-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(cID, "/root/dir1") + "." + dstDir := cpPath(tmpDir, "dir2") + dstPath := filepath.Join(dstDir, "file1-1") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err := os.RemoveAll(dstDir); err != nil { + c.Fatalf("unable to remove dstDir: %s", err) + } + + if err := os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + c.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil { + c.Fatal(err) + } +} diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go new file mode 100644 index 00000000..64ae0b5d --- /dev/null +++ b/integration-cli/docker_cli_cp_test.go @@ -0,0 +1,804 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/go-check/check" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Ensure that an all-local path case returns an error. +func (s *DockerSuite) TestCpLocalOnly(c *check.C) { + err := runDockerCp(c, "foo", "bar") + if err == nil { + c.Fatal("expected failure, got success") + } + + if !strings.Contains(err.Error(), "must specify at least one container source") { + c.Fatalf("unexpected output: %s", err.Error()) + } +} + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func (s *DockerSuite) TestCpGarbagePath(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + c.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + c.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + c.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("../../../../../../../../../../../../", cpFullPath) + + dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + c.Fatal(err) + } + + if string(test) == cpHostContents { + c.Errorf("output matched host file -- garbage path can escape container rootfs") + } + + if string(test) != cpContainerContents { + c.Errorf("output doesn't match the input for garbage path") + } + +} + +// Check that relative paths are relative to the container's rootfs +func (s *DockerSuite) TestCpRelativePath(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + c.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + c.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + c.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + var relPath string + if path.IsAbs(cpFullPath) { + // normally this is `filepath.Rel("/", cpFullPath)` but we cannot + // get this unix-path manipulation on windows with filepath. + relPath = cpFullPath[1:] + } else { + c.Fatalf("path %s was assumed to be an absolute path", cpFullPath) + } + + dockerCmd(c, "cp", cleanedContainerID+":"+relPath, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + c.Fatal(err) + } + + if string(test) == cpHostContents { + c.Errorf("output matched host file -- relative path can escape container rootfs") + } + + if string(test) != cpContainerContents { + c.Errorf("output doesn't match the input for relative path") + } + +} + +// Check that absolute paths are relative to the container's rootfs +func (s *DockerSuite) TestCpAbsolutePath(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + c.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + c.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + c.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + c.Fatal(err) + } + + if string(test) == cpHostContents { + c.Errorf("output matched host file -- absolute path can escape container rootfs") + } + + if string(test) != cpContainerContents { + c.Errorf("output doesn't match the input for absolute path") + } + +} + +// Test for #5619 +// Check that absolute symlinks are still relative to the container's rootfs +func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + c.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + c.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + c.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, "container_path") + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path") + + dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir) + + // We should have copied a symlink *NOT* the file itself! + linkTarget, err := os.Readlink(tmpname) + if err != nil { + c.Fatal(err) + } + + if linkTarget != filepath.FromSlash(cpFullPath) { + c.Errorf("symlink target was %q, but expected: %q", linkTarget, cpFullPath) + } +} + +// Check that symlinks to a directory behave as expected when copying one from +// a container. +func (s *DockerSuite) TestCpFromSymlinkToDirectory(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPathParent+" /dir_link") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + testDir, err := ioutil.TempDir("", "test-cp-from-symlink-to-dir-") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", cleanedContainerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + linkTarget, err := os.Readlink(expectedPath) + if err != nil { + c.Fatalf("unable to read symlink at %q: %v", expectedPath, err) + } + + if linkTarget != filepath.FromSlash(cpTestPathParent) { + c.Errorf("symlink target was %q, but expected: %q", linkTarget, cpTestPathParent) + } + + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // seperator), copying the target into the temporary directory. + dockerCmd(c, "cp", cleanedContainerID+":"+"/dir_link/", testDir) + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testDir, cpTestPathParent) + if stat, err := os.Lstat(unexpectedPath); err == nil { + c.Fatalf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + + // It *should* have copied the directory using the asked name "dir_link". + stat, err := os.Lstat(expectedPath) + if err != nil { + c.Fatalf("unable to stat resource at %q: %v", expectedPath, err) + } + + if !stat.IsDir() { + c.Errorf("should have copied a directory but got %q instead", stat.Mode()) + } +} + +// Check that symlinks to a directory behave as expected when copying one to a +// container. +func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) { + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(testVol) + + // Create a test container with a local volume. We will test by copying + // to the volume path in the container which we can then verify locally. + out, exitCode := dockerCmd(c, "create", "-v", testVol+":/testVol", "busybox") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + // Create a temp directory to hold a test file nested in a direcotry. + testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(testDir) + + // This file will be at "/testDir/some/path/test" and will be copied into + // the test volume later. + hostTestFilename := filepath.Join(testDir, cpFullPath) + if err := os.MkdirAll(filepath.Dir(hostTestFilename), os.FileMode(0700)); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(hostTestFilename, []byte(cpHostContents), os.FileMode(0600)); err != nil { + c.Fatal(err) + } + + // Now create another temp directory to hold a symlink to the + // "/testDir/some" directory. + linkDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(linkDir) + + // Then symlink "/linkDir/dir_link" to "/testdir/some". + linkTarget := filepath.Join(testDir, cpTestPathParent) + localLink := filepath.Join(linkDir, "dir_link") + if err := os.Symlink(linkTarget, localLink); err != nil { + c.Fatal(err) + } + + // Now copy that symlink into the test volume in the container. + dockerCmd(c, "cp", localLink, cleanedContainerID+":/testVol") + + // This copy command should have copied the symlink *not* the target. + expectedPath := filepath.Join(testVol, "dir_link") + actualLinkTarget, err := os.Readlink(expectedPath) + if err != nil { + c.Fatalf("unable to read symlink at %q: %v", expectedPath, err) + } + + if actualLinkTarget != linkTarget { + c.Errorf("symlink target was %q, but expected: %q", actualLinkTarget, linkTarget) + } + + // Good, now remove that copied link for the next test. + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // seperator), copying the target into the test volume directory in the + // container. + dockerCmd(c, "cp", localLink+"/", cleanedContainerID+":/testVol") + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testVol, cpTestPathParent) + if stat, err := os.Lstat(unexpectedPath); err == nil { + c.Fatalf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + + // It *should* have copied the directory using the asked name "dir_link". + stat, err := os.Lstat(expectedPath) + if err != nil { + c.Fatalf("unable to stat resource at %q: %v", expectedPath, err) + } + + if !stat.IsDir() { + c.Errorf("should have copied a directory but got %q instead", stat.Mode()) + } + + // And this directory should contain the file copied from the host at the + // expected location: "/testVol/dir_link/path/test" + expectedFilepath := filepath.Join(testVol, "dir_link/path/test") + fileContents, err := ioutil.ReadFile(expectedFilepath) + if err != nil { + c.Fatal(err) + } + + if string(fileContents) != cpHostContents { + c.Fatalf("file contains %q but expected %q", string(fileContents), cpHostContents) + } +} + +// Test for #5619 +// Check that symlinks which are part of the resource path are still relative to the container's rootfs +func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + c.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + c.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + c.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path", cpTestName) + + dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + c.Fatal(err) + } + + if string(test) == cpHostContents { + c.Errorf("output matched host file -- symlink path component can escape container rootfs") + } + + if string(test) != cpContainerContents { + c.Errorf("output doesn't match the input for symlink path component") + } + +} + +// Check that cp with unprivileged user doesn't return any error +func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { + testRequires(c, UnixCli) // uses chmod/su: not available on windows + + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + c.Fatal(err) + } + + defer os.RemoveAll(tmpdir) + + if err = os.Chmod(tmpdir, 0777); err != nil { + c.Fatal(err) + } + + path := cpTestName + + _, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+cleanedContainerID+":"+path+" "+tmpdir)) + if err != nil { + c.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err) + } + +} + +func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { + testRequires(c, SameHostDaemon) + + outDir, err := ioutil.TempDir("", "cp-test-special-files") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(outDir) + + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", cleanedContainerID+":/etc/resolv.conf", outDir) + + expected, err := ioutil.ReadFile("/var/lib/docker/containers/" + cleanedContainerID + "/resolv.conf") + actual, err := ioutil.ReadFile(outDir + "/resolv.conf") + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container resolvconf") + } + + // Copy actual /etc/hosts + dockerCmd(c, "cp", cleanedContainerID+":/etc/hosts", outDir) + + expected, err = ioutil.ReadFile("/var/lib/docker/containers/" + cleanedContainerID + "/hosts") + actual, err = ioutil.ReadFile(outDir + "/hosts") + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container hosts") + } + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", cleanedContainerID+":/etc/hostname", outDir) + + expected, err = ioutil.ReadFile("/var/lib/docker/containers/" + cleanedContainerID + "/hostname") + actual, err = ioutil.ReadFile(outDir + "/hostname") + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container resolvconf") + } + +} + +func (s *DockerSuite) TestCpVolumePath(c *check.C) { + testRequires(c, SameHostDaemon) + + tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(outDir) + _, err = os.Create(tmpDir + "/test") + if err != nil { + c.Fatal(err) + } + + out, exitCode := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + // Copy actual volume path + dockerCmd(c, "cp", cleanedContainerID+":/foo", outDir) + + stat, err := os.Stat(outDir + "/foo") + if err != nil { + c.Fatal(err) + } + if !stat.IsDir() { + c.Fatal("expected copied content to be dir") + } + stat, err = os.Stat(outDir + "/foo/bar") + if err != nil { + c.Fatal(err) + } + if stat.IsDir() { + c.Fatal("Expected file `bar` to be a file") + } + + // Copy file nested in volume + dockerCmd(c, "cp", cleanedContainerID+":/foo/bar", outDir) + + stat, err = os.Stat(outDir + "/bar") + if err != nil { + c.Fatal(err) + } + if stat.IsDir() { + c.Fatal("Expected file `bar` to be a file") + } + + // Copy Bind-mounted dir + dockerCmd(c, "cp", cleanedContainerID+":/baz", outDir) + stat, err = os.Stat(outDir + "/baz") + if err != nil { + c.Fatal(err) + } + if !stat.IsDir() { + c.Fatal("Expected `baz` to be a dir") + } + + // Copy file nested in bind-mounted dir + dockerCmd(c, "cp", cleanedContainerID+":/baz/test", outDir) + fb, err := ioutil.ReadFile(outDir + "/baz/test") + if err != nil { + c.Fatal(err) + } + fb2, err := ioutil.ReadFile(tmpDir + "/test") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(fb, fb2) { + c.Fatalf("Expected copied file to be duplicate of bind-mounted file") + } + + // Copy bind-mounted file + dockerCmd(c, "cp", cleanedContainerID+":/test", outDir) + fb, err = ioutil.ReadFile(outDir + "/test") + if err != nil { + c.Fatal(err) + } + fb2, err = ioutil.ReadFile(tmpDir + "/test") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(fb, fb2) { + c.Fatalf("Expected copied file to be duplicate of bind-mounted file") + } + +} + +func (s *DockerSuite) TestCpToDot(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpdir) + cwd, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(cwd) + if err := os.Chdir(tmpdir); err != nil { + c.Fatal(err) + } + dockerCmd(c, "cp", cleanedContainerID+":/test", ".") + content, err := ioutil.ReadFile("./test") + if string(content) != "lololol\n" { + c.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n") + } +} + +func (s *DockerSuite) TestCpToStdout(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + if exitCode != 0 { + c.Fatalf("failed to create a container:%s\n", out) + } + + cID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cID) + if strings.TrimSpace(out) != "0" { + c.Fatalf("failed to set up container:%s\n", out) + } + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "cp", cID+":/test", "-"), + exec.Command("tar", "-vtf", "-")) + + if err != nil { + c.Fatalf("Failed to run commands: %s", err) + } + + if !strings.Contains(out, "test") || !strings.Contains(out, "-rw") { + c.Fatalf("Missing file from tar TOC:\n%s", out) + } +} + +func (s *DockerSuite) TestCpNameHasColon(c *check.C) { + testRequires(c, SameHostDaemon) + + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dockerCmd(c, "cp", cleanedContainerID+":/te:s:t", tmpdir) + content, err := ioutil.ReadFile(tmpdir + "/te:s:t") + if string(content) != "lololol\n" { + c.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n") + } +} + +func (s *DockerSuite) TestCopyAndRestart(c *check.C) { + expectedMsg := "hello" + out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", expectedMsg) + id := strings.TrimSpace(string(out)) + + out, _ = dockerCmd(c, "wait", id) + + status := strings.TrimSpace(out) + if status != "0" { + c.Fatalf("container exited with status %s", status) + } + + tmpDir, err := ioutil.TempDir("", "test-docker-restart-after-copy-") + if err != nil { + c.Fatalf("unable to make temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + dockerCmd(c, "cp", fmt.Sprintf("%s:/etc/issue", id), tmpDir) + + out, _ = dockerCmd(c, "start", "-a", id) + + msg := strings.TrimSpace(out) + if msg != expectedMsg { + c.Fatalf("expected %q but got %q", expectedMsg, msg) + } +} + +func (s *DockerSuite) TestCopyCreatedContainer(c *check.C) { + dockerCmd(c, "create", "--name", "test_cp", "-v", "/test", "busybox") + + tmpDir, err := ioutil.TempDir("", "test") + if err != nil { + c.Fatalf("unable to make temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + dockerCmd(c, "cp", "test_cp:/bin/sh", tmpDir) +} diff --git a/integration-cli/docker_cli_cp_to_container_test.go b/integration-cli/docker_cli_cp_to_container_test.go new file mode 100644 index 00000000..341121d2 --- /dev/null +++ b/integration-cli/docker_cli_cp_to_container_test.go @@ -0,0 +1,746 @@ +package main + +import ( + "os" + + "github.com/go-check/check" +) + +// docker cp LOCALPATH CONTAINER:PATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(cID, "file1") + + err := runDockerCp(c, srcPath, dstPath) + if err == nil { + c.Fatal("expected IsNotExist error, but got nil instead") + } + + if !isCpNotExist(err) { + c.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPathTrailingSep(tmpDir, "file1") + dstPath := containerCpPath(cID, "testDir") + + err := runDockerCp(c, srcPath, dstPath) + if err == nil { + c.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isCpNotDir(err) { + c.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(cID, "/notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath) + if err == nil { + c.Fatal("expected IsNotExist error, but got nil instead") + } + + if !isCpNotExist(err) { + c.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + if err := runDockerCp(c, srcPath, dstPath); err == nil { + c.Fatal("expected IsNotExist error, but got nil instead") + } + + if !isCpNotExist(err) { + c.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing path separator but exists as a +// file. Also test that we cannot overwirite an existing directory with a +// non-directory and cannot overwrite an existing +func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{addContent: true}) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "dir1/file1-1") + dstPath := containerCpPathTrailingSep(cID, "file1") + + // The client should encounter an error trying to stat the destination + // and then be unable to copy since the destination is asserted to be a + // directory but does not exist. + err := runDockerCp(c, srcPath, dstPath) + if err == nil { + c.Fatal("expected DirNotExist error, but got nil instead") + } + + if !isCpDirNotExist(err) { + c.Fatalf("expected DirNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + // The client should encounter an error trying to stat the destination and + // then decide to extract to the parent directory instead with a rebased + // name in the source archive, but this directory would overwrite the + // existing file with the same name. + err = runDockerCp(c, srcPath, dstPath) + if err == nil { + c.Fatal("expected CannotOverwriteNonDirWithDir error, but got nil instead") + } + + if !isCannotOverwriteNonDirWithDir(err) { + c.Fatalf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err) + } +} + +// Check that copying from a local path to a symlink in a container copies to +// the symlink target and does not overwrite the container symlink itself. +func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol := getTestDir(c, "test-cp-to-symlink-destination-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + cID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + defer deleteContainer(cID) + + // First, copy a local file to a symlink to a file in the container. This + // should overwrite the symlink target contents with the source contents. + srcPath := cpPath(testVol, "file2") + dstPath := containerCpPath(cID, "/vol2/symlinkToFile1") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"); err != nil { + c.Fatal(err) + } + + // The file should have the contents of "file2" now. + if err := fileContentEquals(c, cpPath(testVol, "file1"), "file2\n"); err != nil { + c.Fatal(err) + } + + // Next, copy a local file to a symlink to a directory in the container. + // This should copy the file into the symlink target directory. + dstPath = containerCpPath(cID, "/vol2/symlinkToDir1") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"); err != nil { + c.Fatal(err) + } + + // The file should have the contents of "file2" now. + if err := fileContentEquals(c, cpPath(testVol, "file2"), "file2\n"); err != nil { + c.Fatal(err) + } + + // Next, copy a file to a symlink to a file that does not exist (a broken + // symlink) in the container. This should create the target file with the + // contents of the source file. + dstPath = containerCpPath(cID, "/vol2/brokenSymlinkToFileX") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"); err != nil { + c.Fatal(err) + } + + // The file should have the contents of "file2" now. + if err := fileContentEquals(c, cpPath(testVol, "fileX"), "file2\n"); err != nil { + c.Fatal(err) + } + + // Next, copy a local directory to a symlink to a directory in the + // container. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = cpPath(testVol, "/dir2") + dstPath = containerCpPath(cID, "/vol2/symlinkToDir1") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"); err != nil { + c.Fatal(err) + } + + // The directory should now contain a copy of "dir2". + if err := fileContentEquals(c, cpPath(testVol, "dir1/dir2/file2-1"), "file2-1\n"); err != nil { + c.Fatal(err) + } + + // Next, copy a local directory to a symlink to a local directory that does + // not exist (a broken symlink) in the container. This should create the + // target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = containerCpPath(cID, "/vol2/brokenSymlinkToDirX") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // The symlink should not have been modified. + if err := symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"); err != nil { + c.Fatal(err) + } + + // The "dirX" directory should now be a copy of "dir2". + if err := fileContentEquals(c, cpPath(testVol, "dirX/file2-1"), "file2-1\n"); err != nil { + c.Fatal(err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpToCaseA(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + workDir: "/root", command: makeCatFileCommand("itWorks.txt"), + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-a") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(cID, "/root/itWorks.txt") + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + if err := containerStartOutputEquals(c, cID, "file1\n"); err != nil { + c.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpToCaseB(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("testDir/file1"), + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-b") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPathTrailingSep(cID, "testDir") + + err := runDockerCp(c, srcPath, dstDir) + if err == nil { + c.Fatal("expected DirNotExists error, but got nil instead") + } + + if !isCpDirNotExist(err) { + c.Fatalf("expected DirNotExists error, but got %T: %s", err, err) + } +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpToCaseC(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("file2"), + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(cID, "/root/file2") + + // Ensure the container's file starts with the original content. + if err := containerStartOutputEquals(c, cID, "file2\n"); err != nil { + c.Fatal(err) + } + + if err := runDockerCp(c, srcPath, dstPath); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1's contents. + if err := containerStartOutputEquals(c, cID, "file1\n"); err != nil { + c.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseD(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPath(cID, "dir1") + + // Ensure that dstPath doesn't exist. + if err := containerStartOutputEquals(c, cID, ""); err != nil { + c.Fatal(err) + } + + if err := runDockerCp(c, srcPath, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1's contents. + if err := containerStartOutputEquals(c, cID, "file1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + cID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + defer deleteContainer(cID) + + dstDir = containerCpPathTrailingSep(cID, "dir1") + + // Ensure that dstPath doesn't exist. + if err := containerStartOutputEquals(c, cID, ""); err != nil { + c.Fatal(err) + } + + if err := runDockerCp(c, srcPath, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1's contents. + if err := containerStartOutputEquals(c, cID, "file1\n"); err != nil { + c.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpToCaseE(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-e") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(cID, "testDir") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1-1's contents. + if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + cID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + defer deleteContainer(cID) + + dstDir = containerCpPathTrailingSep(cID, "testDir") + + err := runDockerCp(c, srcDir, dstDir) + if err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1-1's contents. + if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil { + c.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpToCaseF(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstFile := containerCpPath(cID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile) + if err == nil { + c.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if !isCpCannotCopyDir(err) { + c.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseG(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("dir2/dir1/file1-1"), + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(cID, "/root/dir2") + + // Ensure that dstPath doesn't exist. + if err := containerStartOutputEquals(c, cID, ""); err != nil { + c.Fatal(err) + } + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1-1's contents. + if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + cID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir2/dir1/file1-1"), + }) + defer deleteContainer(cID) + + dstDir = containerCpPathTrailingSep(cID, "/dir2") + + // Ensure that dstPath doesn't exist. + if err := containerStartOutputEquals(c, cID, ""); err != nil { + c.Fatal(err) + } + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1-1's contents. + if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil { + c.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseH(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-h") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(cID, "testDir") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1-1's contents. + if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + cID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + defer deleteContainer(cID) + + dstDir = containerCpPathTrailingSep(cID, "testDir") + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1-1's contents. + if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil { + c.Fatal(err) + } +} + +// I. SRC specifies a direcotry's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpToCaseI(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstFile := containerCpPath(cID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile) + if err == nil { + c.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if !isCpCannotCopyDir(err) { + c.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpToCaseJ(c *check.C) { + cID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("/dir2/file1-1"), + }) + defer deleteContainer(cID) + + tmpDir := getTestDir(c, "test-cp-to-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(cID, "/dir2") + + // Ensure that dstPath doesn't exist. + if err := containerStartOutputEquals(c, cID, ""); err != nil { + c.Fatal(err) + } + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1-1's contents. + if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil { + c.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + cID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/dir2/file1-1"), + }) + defer deleteContainer(cID) + + dstDir = containerCpPathTrailingSep(cID, "/dir2") + + // Ensure that dstPath doesn't exist. + if err := containerStartOutputEquals(c, cID, ""); err != nil { + c.Fatal(err) + } + + if err := runDockerCp(c, srcDir, dstDir); err != nil { + c.Fatalf("unexpected error %T: %s", err, err) + } + + // Should now contain file1-1's contents. + if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil { + c.Fatal(err) + } +} + +// The `docker cp` command should also ensure that you cannot +// write to a container rootfs that is marked as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { + tmpDir := getTestDir(c, "test-cp-to-err-read-only-rootfs") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + cID := makeTestContainer(c, testContainerOptions{ + readOnly: true, workDir: "/root", + command: makeCatFileCommand("shouldNotExist"), + }) + defer deleteContainer(cID) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(cID, "/root/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath) + if err == nil { + c.Fatal("expected ErrContainerRootfsReadonly error, but got nil instead") + } + + if !isCpCannotCopyReadOnly(err) { + c.Fatalf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err) + } + + // Ensure that dstPath doesn't exist. + if err := containerStartOutputEquals(c, cID, ""); err != nil { + c.Fatal(err) + } +} + +// The `docker cp` command should also ensure that you +// cannot write to a volume that is mounted as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { + tmpDir := getTestDir(c, "test-cp-to-err-read-only-volume") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + cID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(tmpDir), workDir: "/root", + command: makeCatFileCommand("/vol_ro/shouldNotExist"), + }) + defer deleteContainer(cID) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(cID, "/vol_ro/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath) + if err == nil { + c.Fatal("expected ErrVolumeReadonly error, but got nil instead") + } + + if !isCpCannotCopyReadOnly(err) { + c.Fatalf("expected ErrVolumeReadonly error, but got %T: %s", err, err) + } + + // Ensure that dstPath doesn't exist. + if err := containerStartOutputEquals(c, cID, ""); err != nil { + c.Fatal(err) + } +} diff --git a/integration-cli/docker_cli_cp_utils.go b/integration-cli/docker_cli_cp_utils.go new file mode 100644 index 00000000..c26ebfd7 --- /dev/null +++ b/integration-cli/docker_cli_cp_utils.go @@ -0,0 +1,316 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/go-check/check" +) + +type fileType uint32 + +const ( + ftRegular fileType = iota + ftDir + ftSymlink +) + +type fileData struct { + filetype fileType + path string + contents string +} + +func (fd fileData) creationCommand() string { + var command string + + switch fd.filetype { + case ftRegular: + // Don't overwrite the file if it already exists! + command = fmt.Sprintf("if [ ! -f %s ]; then echo %q > %s; fi", fd.path, fd.contents, fd.path) + case ftDir: + command = fmt.Sprintf("mkdir -p %s", fd.path) + case ftSymlink: + command = fmt.Sprintf("ln -fs %s %s", fd.contents, fd.path) + } + + return command +} + +func mkFilesCommand(fds []fileData) string { + commands := make([]string, len(fds)) + + for i, fd := range fds { + commands[i] = fd.creationCommand() + } + + return strings.Join(commands, " && ") +} + +var defaultFileData = []fileData{ + {ftRegular, "file1", "file1"}, + {ftRegular, "file2", "file2"}, + {ftRegular, "file3", "file3"}, + {ftRegular, "file4", "file4"}, + {ftRegular, "file5", "file5"}, + {ftRegular, "file6", "file6"}, + {ftRegular, "file7", "file7"}, + {ftDir, "dir1", ""}, + {ftRegular, "dir1/file1-1", "file1-1"}, + {ftRegular, "dir1/file1-2", "file1-2"}, + {ftDir, "dir2", ""}, + {ftRegular, "dir2/file2-1", "file2-1"}, + {ftRegular, "dir2/file2-2", "file2-2"}, + {ftDir, "dir3", ""}, + {ftRegular, "dir3/file3-1", "file3-1"}, + {ftRegular, "dir3/file3-2", "file3-2"}, + {ftDir, "dir4", ""}, + {ftRegular, "dir4/file3-1", "file4-1"}, + {ftRegular, "dir4/file3-2", "file4-2"}, + {ftDir, "dir5", ""}, + {ftSymlink, "symlinkToFile1", "file1"}, + {ftSymlink, "symlinkToDir1", "dir1"}, + {ftSymlink, "brokenSymlinkToFileX", "fileX"}, + {ftSymlink, "brokenSymlinkToDirX", "dirX"}, + {ftSymlink, "symlinkToAbsDir", "/root"}, +} + +func defaultMkContentCommand() string { + return mkFilesCommand(defaultFileData) +} + +func makeTestContentInDir(c *check.C, dir string) { + for _, fd := range defaultFileData { + path := filepath.Join(dir, filepath.FromSlash(fd.path)) + switch fd.filetype { + case ftRegular: + if err := ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(0666)); err != nil { + c.Fatal(err) + } + case ftDir: + if err := os.Mkdir(path, os.FileMode(0777)); err != nil { + c.Fatal(err) + } + case ftSymlink: + if err := os.Symlink(fd.contents, path); err != nil { + c.Fatal(err) + } + } + } +} + +type testContainerOptions struct { + addContent bool + readOnly bool + volumes []string + workDir string + command string +} + +func makeTestContainer(c *check.C, options testContainerOptions) (containerID string) { + if options.addContent { + mkContentCmd := defaultMkContentCommand() + if options.command == "" { + options.command = mkContentCmd + } else { + options.command = fmt.Sprintf("%s && %s", defaultMkContentCommand(), options.command) + } + } + + if options.command == "" { + options.command = "#(nop)" + } + + args := []string{"run", "-d"} + + for _, volume := range options.volumes { + args = append(args, "-v", volume) + } + + if options.workDir != "" { + args = append(args, "-w", options.workDir) + } + + if options.readOnly { + args = append(args, "--read-only") + } + + args = append(args, "busybox", "/bin/sh", "-c", options.command) + + out, status := dockerCmd(c, args...) + if status != 0 { + c.Fatalf("failed to run container, status %d: %s", status, out) + } + + containerID = strings.TrimSpace(out) + + out, status = dockerCmd(c, "wait", containerID) + if status != 0 { + c.Fatalf("failed to wait for test container container, status %d: %s", status, out) + } + + if exitCode := strings.TrimSpace(out); exitCode != "0" { + logs, status := dockerCmd(c, "logs", containerID) + if status != 0 { + logs = "UNABLE TO GET LOGS" + } + c.Fatalf("failed to make test container, exit code (%d): %s", exitCode, logs) + } + + return +} + +func makeCatFileCommand(path string) string { + return fmt.Sprintf("if [ -f %s ]; then cat %s; fi", path, path) +} + +func cpPath(pathElements ...string) string { + localizedPathElements := make([]string, len(pathElements)) + for i, path := range pathElements { + localizedPathElements[i] = filepath.FromSlash(path) + } + return strings.Join(localizedPathElements, string(filepath.Separator)) +} + +func cpPathTrailingSep(pathElements ...string) string { + return fmt.Sprintf("%s%c", cpPath(pathElements...), filepath.Separator) +} + +func containerCpPath(containerID string, pathElements ...string) string { + joined := strings.Join(pathElements, "/") + return fmt.Sprintf("%s:%s", containerID, joined) +} + +func containerCpPathTrailingSep(containerID string, pathElements ...string) string { + return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) +} + +func runDockerCp(c *check.C, src, dst string) (err error) { + c.Logf("running `docker cp %s %s`", src, dst) + + args := []string{"cp", src, dst} + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker cp` command: %s: %s", err, out) + } + + return +} + +func startContainerGetOutput(c *check.C, cID string) (out string, err error) { + c.Logf("running `docker start -a %s`", cID) + + args := []string{"start", "-a", cID} + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker start` command: %s: %s", err, out) + } + + return +} + +func getTestDir(c *check.C, label string) (tmpDir string) { + var err error + + if tmpDir, err = ioutil.TempDir("", label); err != nil { + c.Fatalf("unable to make temporary directory: %s", err) + } + + return +} + +func isCpNotExist(err error) bool { + return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified") +} + +func isCpDirNotExist(err error) bool { + return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) +} + +func isCpNotDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") +} + +func isCpCannotCopyDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) +} + +func isCpCannotCopyReadOnly(err error) bool { + return strings.Contains(err.Error(), "marked read-only") +} + +func isCannotOverwriteNonDirWithDir(err error) bool { + return strings.Contains(err.Error(), "cannot overwrite non-directory") +} + +func fileContentEquals(c *check.C, filename, contents string) (err error) { + c.Logf("checking that file %q contains %q\n", filename, contents) + + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return + } + + expectedBytes, err := ioutil.ReadAll(strings.NewReader(contents)) + if err != nil { + return + } + + if !bytes.Equal(fileBytes, expectedBytes) { + err = fmt.Errorf("file content not equal - expected %q, got %q", string(expectedBytes), string(fileBytes)) + } + + return +} + +func symlinkTargetEquals(c *check.C, symlink, expectedTarget string) (err error) { + c.Logf("checking that the symlink %q points to %q\n", symlink, expectedTarget) + + actualTarget, err := os.Readlink(symlink) + if err != nil { + return err + } + + if actualTarget != expectedTarget { + return fmt.Errorf("symlink target points to %q not %q", actualTarget, expectedTarget) + } + + return nil +} + +func containerStartOutputEquals(c *check.C, cID, contents string) (err error) { + c.Logf("checking that container %q start output contains %q\n", cID, contents) + + out, err := startContainerGetOutput(c, cID) + if err != nil { + return err + } + + if out != contents { + err = fmt.Errorf("output contents not equal - expected %q, got %q", contents, out) + } + + return +} + +func defaultVolumes(tmpDir string) []string { + if SameHostDaemon.Condition() { + return []string{ + "/vol1", + fmt.Sprintf("%s:/vol2", tmpDir), + fmt.Sprintf("%s:/vol3", filepath.Join(tmpDir, "vol3")), + fmt.Sprintf("%s:/vol_ro:ro", filepath.Join(tmpDir, "vol_ro")), + } + } + + // Can't bind-mount volumes with separate host daemon. + return []string{"/vol1", "/vol2", "/vol3", "/vol_ro:/vol_ro:ro"} +} diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go new file mode 100644 index 00000000..482e96f9 --- /dev/null +++ b/integration-cli/docker_cli_create_test.go @@ -0,0 +1,452 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strings" + "time" + + "os/exec" + + "io/ioutil" + + "github.com/docker/docker/pkg/nat" + "github.com/go-check/check" +) + +// Make sure we can create a simple container with some args +func (s *DockerSuite) TestCreateArgs(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox", "command", "arg1", "arg2", "arg with space") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + ID string + Created time.Time + Path string + Args []string + Image string + }{} + if err := json.Unmarshal([]byte(out), &containers); err != nil { + c.Fatalf("Error inspecting the container: %s", err) + } + if len(containers) != 1 { + c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + } + + cont := containers[0] + if cont.Path != "command" { + c.Fatalf("Unexpected container path. Expected command, received: %s", cont.Path) + } + + b := false + expected := []string{"arg1", "arg2", "arg with space"} + for i, arg := range expected { + if arg != cont.Args[i] { + b = true + break + } + } + if len(cont.Args) != len(expected) || b { + c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args) + } + +} + +// Make sure we can set hostconfig options too +func (s *DockerSuite) TestCreateHostConfig(c *check.C) { + + out, _ := dockerCmd(c, "create", "-P", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PublishAllPorts bool + } + }{} + if err := json.Unmarshal([]byte(out), &containers); err != nil { + c.Fatalf("Error inspecting the container: %s", err) + } + if len(containers) != 1 { + c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + } + + cont := containers[0] + if cont.HostConfig == nil { + c.Fatalf("Expected HostConfig, got none") + } + + if !cont.HostConfig.PublishAllPorts { + c.Fatalf("Expected PublishAllPorts, got false") + } + +} + +func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { + + out, _ := dockerCmd(c, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + if err := json.Unmarshal([]byte(out), &containers); err != nil { + c.Fatalf("Error inspecting the container: %s", err) + } + if len(containers) != 1 { + c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + } + + cont := containers[0] + if cont.HostConfig == nil { + c.Fatalf("Expected HostConfig, got none") + } + + if len(cont.HostConfig.PortBindings) != 4 { + c.Fatalf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings)) + } + for k, v := range cont.HostConfig.PortBindings { + if len(v) != 1 { + c.Fatalf("Expected 1 ports binding, for the port %s but found %s", k, v) + } + if k.Port() != v[0].HostPort { + c.Fatalf("Expected host port %d to match published port %d", k.Port(), v[0].HostPort) + } + } + +} + +func (s *DockerSuite) TestCreateWithiLargePortRange(c *check.C) { + + out, _ := dockerCmd(c, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + if err := json.Unmarshal([]byte(out), &containers); err != nil { + c.Fatalf("Error inspecting the container: %s", err) + } + if len(containers) != 1 { + c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + } + + cont := containers[0] + if cont.HostConfig == nil { + c.Fatalf("Expected HostConfig, got none") + } + + if len(cont.HostConfig.PortBindings) != 65535 { + c.Fatalf("Expected 65535 ports bindings, got %d", len(cont.HostConfig.PortBindings)) + } + for k, v := range cont.HostConfig.PortBindings { + if len(v) != 1 { + c.Fatalf("Expected 1 ports binding, for the port %s but found %s", k, v) + } + if k.Port() != v[0].HostPort { + c.Fatalf("Expected host port %d to match published port %d", k.Port(), v[0].HostPort) + } + } + +} + +// "test123" should be printed by docker create + start +func (s *DockerSuite) TestCreateEchoStdout(c *check.C) { + + out, _ := dockerCmd(c, "create", "busybox", "echo", "test123") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "start", "-ai", cleanedContainerID) + + if out != "test123\n" { + c.Errorf("container should've printed 'test123', got %q", out) + } + +} + +func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "test_create_volume" + dockerCmd(c, "create", "--name", name, "-v", "/foo", "busybox") + + dir, err := inspectMountSourceField(name, "/foo") + if err != nil { + c.Fatalf("Error getting volume host path: %q", err) + } + + if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { + c.Fatalf("Volume was not created") + } + if err != nil { + c.Fatalf("Error statting volume host path: %q", err) + } + +} + +func (s *DockerSuite) TestCreateLabels(c *check.C) { + name := "test_create_labels" + expected := map[string]string{"k1": "v1", "k2": "v2"} + dockerCmd(c, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox") + + actual := make(map[string]string) + err := inspectFieldAndMarshall(name, "Config.Labels", &actual) + if err != nil { + c.Fatal(err) + } + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { + imageName := "testcreatebuildlabel" + _, err := buildImage(imageName, + `FROM busybox + LABEL k1=v1 k2=v2`, + true) + if err != nil { + c.Fatal(err) + } + + name := "test_create_labels_from_image" + expected := map[string]string{"k2": "x", "k3": "v3"} + dockerCmd(c, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName) + + actual := make(map[string]string) + err = inspectFieldAndMarshall(name, "Config.Labels", &actual) + if err != nil { + c.Fatal(err) + } + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { + out, _ := dockerCmd(c, "run", "-h", "web.0", "busybox", "hostname") + if strings.TrimSpace(out) != "web.0" { + c.Fatalf("hostname not set, expected `web.0`, got: %s", out) + } +} + +func (s *DockerSuite) TestCreateRM(c *check.C) { + // Test to make sure we can 'rm' a new container that is in + // "Created" state, and has ever been run. Test "rm -f" too. + + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + dockerCmd(c, "rm", cID) + + // Now do it again so we can "rm -f" this time + out, _ = dockerCmd(c, "create", "busybox") + + cID = strings.TrimSpace(out) + dockerCmd(c, "rm", "-f", cID) +} + +func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "create", "busybox") + id := strings.TrimSpace(out) + + dockerCmd(c, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox") +} + +func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-create") + + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + if err != nil { + c.Fatalf("Error running trusted create: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try untrusted create to ensure we pushed the tag to the registry + createCmd = exec.Command(dockerBinary, "create", "--disable-content-trust=true", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + if err != nil { + c.Fatalf("Error running trusted create: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on trusted create with --disable-content-trust:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + // Try trusted create on untrusted tag + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + if err == nil { + c.Fatalf("Error expected when running trusted create with:\n%s", out) + } + + if !strings.Contains(string(out), "no trust data available") { + c.Fatalf("Missing expected output on trusted create:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-create") + + // Try create + createCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated-create", "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + if err != nil { + c.Fatalf("Error running trusted create: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestCreateWhenCertExpired(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-create-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + if err == nil { + c.Fatalf("Error running trusted create in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "could not validate the path to a trusted root") { + c.Fatalf("Missing expected output on trusted create in the distant future:\n%s", out) + } + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try create + createCmd := exec.Command(dockerBinary, "create", "--disable-content-trust", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + if err != nil { + c.Fatalf("Error running untrusted create in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on untrusted create in the distant future:\n%s", out) + } + }) +} + +func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error creating trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + if err != nil { + c.Fatalf("Error creating trusted create: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + if err != nil { + c.Fatalf("Restarting notary server failed.") + } + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error creating trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + // Now, try creating with the original client from this new trust server. This should fail. + createCmd = exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + if err == nil { + c.Fatalf("Expected to fail on this create due to different remote data: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "failed to validate data with current trusted certificates") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } +} diff --git a/integration-cli/docker_cli_daemon_experimental_test.go b/integration-cli/docker_cli_daemon_experimental_test.go new file mode 100644 index 00000000..dc4f7920 --- /dev/null +++ b/integration-cli/docker_cli_daemon_experimental_test.go @@ -0,0 +1,39 @@ +// +build daemon,experimental + +package main + +import ( + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func assertNetwork(c *check.C, d *Daemon, name string) { + out, err := d.Cmd("network", "ls") + c.Assert(err, check.IsNil) + lines := strings.Split(out, "\n") + for i := 1; i < len(lines)-1; i++ { + if strings.Contains(lines[i], name) { + return + } + } + c.Fatalf("Network %s not found in network ls o/p", name) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultNetwork(c *check.C) { + d := s.d + + networkName := "testdefault" + err := d.StartWithBusybox("--default-network", "bridge:"+networkName) + c.Assert(err, check.IsNil) + + _, err = d.Cmd("run", "busybox", "true") + c.Assert(err, check.IsNil) + + assertNetwork(c, d, networkName) + + ifconfigCmd := exec.Command("ifconfig", networkName) + _, _, _, err = runCommandWithStdoutStderr(ifconfigCmd) + c.Assert(err, check.IsNil) +} diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go new file mode 100644 index 00000000..992cd83e --- /dev/null +++ b/integration-cli/docker_cli_daemon_test.go @@ -0,0 +1,1521 @@ +// +build daemon + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/docker/libnetwork/iptables" + "github.com/docker/libtrust" + "github.com/go-check/check" +) + +func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top1: err=%v\n%s", err, out) + } + // --restart=no by default + if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top2: err=%v\n%s", err, out) + } + + testRun := func(m map[string]bool, prefix string) { + var format string + for cont, shouldRun := range m { + out, err := s.d.Cmd("ps") + if err != nil { + c.Fatalf("Could not run ps: err=%v\n%q", err, out) + } + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + if shouldRun != strings.Contains(out, cont) { + c.Fatalf(format, prefix, cont) + } + } + } + + testRun(map[string]bool{"top1": true, "top2": true}, "") + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { + c.Fatal(err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { + c.Fatal(err, out) + } + + out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1") + c.Assert(err, check.IsNil) + + if _, err := inspectMountPointJSON(out, "/foo"); err != nil { + c.Fatalf("Expected volume to exist: /foo, error: %v\n", err) + } +} + +func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { + if err := s.d.Start("--iptables=false"); err != nil { + c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) + } +} + +// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and +// no longer has an IP associated, we should gracefully handle that case and associate +// an IP with it rather than fail daemon start +func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { + // rather than depending on brctl commands to verify docker0 is created and up + // let's start the daemon and stop it, and then make a modification to run the + // actual test + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // now we will remove the ip from docker0 and then try starting the daemon + ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := s.d.Start(); err != nil { + warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" + c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + // make sure the container is not running + runningOut, err := s.d.Cmd("inspect", "--format='{{.State.Running}}'", "top") + if err != nil { + c.Fatalf("Could not inspect on container: %s, %v", out, err) + } + if strings.TrimSpace(runningOut) != "true" { + c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) + } +} + +// TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge +// has the fe80::1 address and that a container is assigned a link-local address +func (s *DockerSuite) TestDaemonIPv6Enabled(c *check.C) { + testRequires(c, IPv6) + + if err := setupV6(); err != nil { + c.Fatal("Could not set up host for IPv6 tests") + } + + d := NewDaemon(c) + + if err := d.StartWithBusybox("--ipv6"); err != nil { + c.Fatal(err) + } + defer d.Stop() + + iface, err := net.InterfaceByName("docker0") + if err != nil { + c.Fatalf("Error getting docker0 interface: %v", err) + } + + addrs, err := iface.Addrs() + if err != nil { + c.Fatalf("Error getting addresses for docker0 interface: %v", err) + } + + var found bool + expected := "fe80::1/64" + + for i := range addrs { + if addrs[i].String() == expected { + found = true + } + } + + if !found { + c.Fatalf("Bridge does not have an IPv6 Address") + } + + if out, err := d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { + c.Fatalf("Could not run container: %s, %v", out, err) + } + + out, err := d.Cmd("inspect", "--format", "'{{.NetworkSettings.LinkLocalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip == nil { + c.Fatalf("Container should have a link-local IPv6 address") + } + + out, err = d.Cmd("inspect", "--format", "'{{.NetworkSettings.GlobalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip != nil { + c.Fatalf("Container should not have a global IPv6 address: %v", out) + } + + if err := teardownV6(); err != nil { + c.Fatal("Could not perform teardown for IPv6 tests") + } + +} + +// TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR +// that running containers are given a link-local and global IPv6 address +func (s *DockerSuite) TestDaemonIPv6FixedCIDR(c *check.C) { + testRequires(c, IPv6) + + if err := setupV6(); err != nil { + c.Fatal("Could not set up host for IPv6 tests") + } + + d := NewDaemon(c) + + if err := d.StartWithBusybox("--ipv6", "--fixed-cidr-v6='2001:db8:1::/64'"); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { + c.Fatalf("Could not run container: %s, %v", out, err) + } + + out, err := d.Cmd("inspect", "--format", "'{{.NetworkSettings.LinkLocalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip == nil { + c.Fatalf("Container should have a link-local IPv6 address") + } + + out, err = d.Cmd("inspect", "--format", "'{{.NetworkSettings.GlobalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip == nil { + c.Fatalf("Container should have a global IPv6 address") + } + if err := teardownV6(); err != nil { + c.Fatal("Could not perform teardown for IPv6 tests") + } +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { + c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) +} + +func (s *DockerSuite) TestDaemonStartWithBackwardCompatibility(c *check.C) { + + var validCommandArgs = [][]string{ + {"--selinux-enabled", "-l", "info"}, + {"--insecure-registry", "daemon"}, + } + + var invalidCommandArgs = [][]string{ + {"--selinux-enabled", "--storage-opt"}, + {"-D", "-b"}, + {"--config", "/tmp"}, + } + + for _, args := range validCommandArgs { + d := NewDaemon(c) + d.Command = "--daemon" + if err := d.Start(args...); err != nil { + c.Fatalf("Daemon should have started successfully with --daemon %v: %v", args, err) + } + d.Stop() + } + + for _, args := range invalidCommandArgs { + d := NewDaemon(c) + if err := d.Start(args...); err == nil { + d.Stop() + c.Fatalf("Daemon should have failed to start with %v", args) + } + } +} + +func (s *DockerSuite) TestDaemonStartWithDaemonCommand(c *check.C) { + + type kind int + + const ( + common kind = iota + daemon + ) + + var flags = []map[kind][]string{ + {common: {"-l", "info"}, daemon: {"--selinux-enabled"}}, + {common: {"-D"}, daemon: {"--selinux-enabled", "-r"}}, + {common: {"-D"}, daemon: {"--restart"}}, + {common: {"--debug"}, daemon: {"--log-driver=json-file", "--log-opt=max-size=1k"}}, + } + + var invalidGlobalFlags = [][]string{ + //Invalid because you cannot pass daemon flags as global flags. + {"--selinux-enabled", "-l", "info"}, + {"-D", "-r"}, + {"--config", "/tmp"}, + } + + // `docker daemon -l info --selinux-enabled` + // should NOT error out + for _, f := range flags { + d := NewDaemon(c) + args := append(f[common], f[daemon]...) + if err := d.Start(args...); err != nil { + c.Fatalf("Daemon should have started successfully with %v: %v", args, err) + } + d.Stop() + } + + // `docker -l info daemon --selinux-enabled` + // should error out + for _, f := range flags { + d := NewDaemon(c) + d.GlobalFlags = f[common] + if err := d.Start(f[daemon]...); err == nil { + d.Stop() + c.Fatalf("Daemon should have failed to start with docker %v daemon %v", d.GlobalFlags, f[daemon]) + } + } + + for _, f := range invalidGlobalFlags { + cmd := exec.Command(dockerBinary, append(f, "daemon")...) + errch := make(chan error) + var err error + go func() { + errch <- cmd.Run() + }() + select { + case <-time.After(time.Second): + cmd.Process.Kill() + case err = <-errch: + } + if err == nil { + c.Fatalf("Daemon should have failed to start with docker %v daemon", f) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { + if err := s.d.Start("--log-level=debug"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { + // we creating new daemons to create new logFile + if err := s.d.Start("--log-level=fatal"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { + if err := s.d.Start("-D"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { + if err := s.d.Start("--debug"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { + if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { + listeningPorts := [][]string{ + {"0.0.0.0", "0.0.0.0", "5678"}, + {"127.0.0.1", "127.0.0.1", "1234"}, + {"localhost", "127.0.0.1", "1235"}, + } + + cmdArgs := make([]string, 0, len(listeningPorts)*2) + for _, hostDirective := range listeningPorts { + cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) + } + + if err := s.d.StartWithBusybox(cmdArgs...); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + for _, hostDirective := range listeningPorts { + output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") + if err == nil { + c.Fatalf("Container should not start, expected port already allocated error: %q", output) + } else if !strings.Contains(output, "port is already allocated") { + c.Fatalf("Expected port is already allocated error: %q", output) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + s.d.Stop() + + k, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + kid := k.KeyID() + // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) + if len(kid) != 59 { + c.Fatalf("Bad key ID: %s", kid) + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + k1, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + c.Fatalf("Error generating private key: %s", err) + } + if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { + c.Fatalf("Error creating .docker directory: %s", err) + } + if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { + c.Fatalf("Error saving private key: %s", err) + } + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + s.d.Stop() + + k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + if k1.KeyID() != k2.KeyID() { + c.Fatalf("Key not migrated") + } +} + +// GH#11320 - verify that the daemon exits on failure properly +// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means +// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required +func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { + //attempt to start daemon with incorrect flags (we know -b and --bip conflict) + if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { + //verify we got the right error + if !strings.Contains(err.Error(), "Daemon exited and never started") { + c.Fatalf("Expected daemon not to start, got %v", err) + } + // look in the log and make sure we got the message that daemon is shutting down + runCmd := exec.Command("grep", "Error starting daemon", s.d.LogfileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err) + } + } else { + //if we didn't get an error and the daemon is running, this is a failure + c.Fatal("Conflicting options should cause the daemon to error out with a failure") + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { + d := s.d + err := d.Start("--bridge", "nosuchbridge") + c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) + defer d.Restart() + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bridge", bridgeName) + c.Assert(err, check.IsNil) + + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", + ipTablesSearchString, out)) + + _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP := d.findContainerIP("ExtContainer") + ip := net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) +} + +func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) { + args := []string{"link", "add", "name", ifName, "type", ifType} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + + ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up") + out, _, err = runCommandWithOutput(ifCfgCmd) + return out, err +} + +func deleteInterface(c *check.C, ifName string) { + ifCmd := exec.Command("ip", "link", "delete", ifName) + out, _, err := runCommandWithOutput(ifCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + flushCmd := exec.Command("iptables", "-t", "nat", "--flush") + out, _, err = runCommandWithOutput(flushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + flushCmd = exec.Command("iptables", "--flush") + out, _, err = runCommandWithOutput(flushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { + // TestDaemonBridgeIP Steps + // 1. Delete the existing docker0 Bridge + // 2. Set --bip daemon configuration and start the new Docker Daemon + // 3. Check if the bip config has taken effect using ifconfig and iptables commands + // 4. Launch a Container and make sure the IP-Address is in the expected subnet + // 5. Delete the docker0 Bridge + // 6. Restart the Docker Daemon (via defered action) + // This Restart takes care of bringing docker0 interface back to auto-assigned IP + + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1/24" + ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + err := d.StartWithBusybox("--bip", bridgeIP) + c.Assert(err, check.IsNil) + defer d.Restart() + + ifconfigSearchString := ip.String() + ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) + out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true, + check.Commentf("ifconfig output should have contained %q, but was %q", + ifconfigSearchString, out)) + + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", + ipTablesSearchString, out)) + + out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP := d.findContainerIP("test") + ip = net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + defer s.d.Restart() + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // now we will change the docker0's IP and then try starting the daemon + bridgeIP := "192.169.100.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + ipCmd := exec.Command("ifconfig", "docker0", bridgeIP) + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := s.d.Start("--bip", bridgeIP); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + //check if the iptables contains new bridgeIP MASQUERADE rule + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + for i := 0; i < 4; i++ { + cName := "Container" + strconv.Itoa(i) + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + if err != nil { + c.Assert(strings.Contains(out, "no available ip addresses"), check.Equals, true, + check.Commentf("Could not run a Container : %s %s", err.Error(), out)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + + err := d.StartWithBusybox("--bip", bridgeIPNet) + c.Assert(err, check.IsNil) + defer d.Restart() + + expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", + bridgeIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + gatewayIP := "192.169.1.254" + + err := d.StartWithBusybox("--bip", bridgeIPNet, "--default-gateway", gatewayIP) + c.Assert(err, check.IsNil) + defer d.Restart() + + expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Explicit default gateway should be %s, but default route was '%s'", + gatewayIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + // Program a custom default gateway outside of the container subnet, daemon should accept it and start + err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") + c.Assert(err, check.IsNil) + + deleteInterface(c, defaultNetworkBridge) + s.d.Restart() +} + +func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { + d := s.d + + ipStr := "192.170.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + args := []string{"--ip", ip.String()} + err := d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.NotNil, + check.Commentf("Running a container must fail with an invalid --ip option")) + c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) + + ifName := "dummy" + out, err = createInterface(c, "dummy", ifName, ipStr) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, ifName) + + _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.IsNil) + + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) +} + +func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--icc=false"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + + // Pinging another container must fail with --icc=false + pingContainers(c, d, true) + + ipStr := "192.171.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + ifName := "icc-dummy" + + createInterface(c, "dummy", ifName, ipStr) + + // But, Pinging external or a Host interface must succeed + pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) + runArgs := []string{"--rm", "busybox", "sh", "-c", pingCmd} + _, err = d.Cmd("run", runArgs...) + c.Assert(err, check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--icc=false"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + + out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = s.d.StartWithBusybox("--bridge", bridgeName, "--icc=false") + c.Assert(err, check.IsNil) + defer s.d.Restart() + + _, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") + c.Assert(err, check.IsNil) + + childIP := s.d.findContainerIP("child") + parentIP := s.d.findContainerIP("parent") + + sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} + destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} + if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules not found") + } + + s.d.Cmd("rm", "--link", "parent/http") + if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules should be removed when unlink") + } + + s.d.Cmd("kill", "child") + s.d.Cmd("kill", "parent") +} + +func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { + testRequires(c, NativeExecDriver) + + if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") + if err != nil { + c.Fatal(out, err) + } + + outArr := strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile := strings.TrimSpace(outArr[0]) + nproc := strings.TrimSpace(outArr[1]) + + if nofile != "42" { + c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } + + // Now restart daemon with a new default + if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil { + c.Fatal(err) + } + + out, err = s.d.Cmd("start", "-a", "test") + if err != nil { + c.Fatal(err) + } + + outArr = strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile = strings.TrimSpace(outArr[0]) + nproc = strings.TrimSpace(outArr[1]) + + if nofile != "43" { + c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } +} + +// #11315 +func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { + c.Fatal(err, out) + } + + if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { + c.Fatal(err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("start", "test2"); err != nil { + c.Fatal(err, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id := strings.TrimSpace(out) + + if out, err := s.d.Cmd("wait", id); err != nil { + c.Fatal(out, err) + } + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "-d", "--log-driver=none", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id := strings.TrimSpace(out) + + if out, err := s.d.Cmd("wait", id); err != nil { + c.Fatal(out, err) + } + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id := strings.TrimSpace(out) + if out, err := s.d.Cmd("wait", id); err != nil { + c.Fatal(out, err) + } + + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id := strings.TrimSpace(out) + + if out, err := s.d.Cmd("wait", id); err != nil { + c.Fatal(out, err) + } + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id := strings.TrimSpace(out) + out, err = s.d.Cmd("logs", id) + if err == nil { + c.Fatalf("Logs should fail with \"none\" driver") + } + if !strings.Contains(out, `"logs" command is supported only for "json-file" logging driver`) { + c.Fatalf("There should be error about non-json-file driver, got: %s", out) + } +} + +func (s *DockerDaemonSuite) TestDaemonDots(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + // Now create 4 containers + if _, err := s.d.Cmd("create", "busybox"); err != nil { + c.Fatalf("Error creating container: %q", err) + } + if _, err := s.d.Cmd("create", "busybox"); err != nil { + c.Fatalf("Error creating container: %q", err) + } + if _, err := s.d.Cmd("create", "busybox"); err != nil { + c.Fatalf("Error creating container: %q", err) + } + if _, err := s.d.Cmd("create", "busybox"); err != nil { + c.Fatalf("Error creating container: %q", err) + } + + s.d.Stop() + + s.d.Start("--log-level=debug") + s.d.Stop() + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if strings.Contains(string(content), "....") { + c.Fatalf("Debug level should not have ....\n%s", string(content)) + } + + s.d.Start("--log-level=error") + s.d.Stop() + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + if strings.Contains(string(content), "....") { + c.Fatalf("Error level should not have ....\n%s", string(content)) + } + + s.d.Start("--log-level=info") + s.d.Stop() + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), "....") { + c.Fatalf("Info level should have ....\n%s", string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { + dir, err := ioutil.TempDir("", "socket-cleanup-test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + sockPath := filepath.Join(dir, "docker.sock") + if err := s.d.Start("--host", "unix://"+sockPath); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(sockPath); err != nil { + c.Fatal("socket does not exist") + } + + if err := s.d.Stop(); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { + c.Fatal("unix socket is not cleaned up") + } +} + +func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { + type Config struct { + Crv string `json:"crv"` + D string `json:"d"` + Kid string `json:"kid"` + Kty string `json:"kty"` + X string `json:"x"` + Y string `json:"y"` + } + + os.Remove("/etc/docker/key.json") + if err := s.d.Start(); err != nil { + c.Fatalf("Failed to start daemon: %v", err) + } + + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + config := &Config{} + bytes, err := ioutil.ReadFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error reading key.json file: %s", err) + } + + // byte[] to Data-Struct + if err := json.Unmarshal(bytes, &config); err != nil { + c.Fatalf("Error Unmarshal: %s", err) + } + + //replace config.Kid with the fake value + config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" + + // NEW Data-Struct to byte[] + newBytes, err := json.Marshal(&config) + if err != nil { + c.Fatalf("Error Marshal: %s", err) + } + + // write back + if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { + c.Fatalf("Error ioutil.WriteFile: %s", err) + } + + defer os.Remove("/etc/docker/key.json") + + if err := s.d.Start(); err == nil { + c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) + } + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + + if !strings.Contains(string(content), "Public Key ID does not match") { + c.Fatal("Missing KeyID message from daemon logs") + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") + if err != nil { + c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) + } + containerID := strings.TrimSpace(out) + + if out, err := s.d.Cmd("kill", containerID); err != nil { + c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("wait", containerID); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on a stopped (killed) container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint +func (s *DockerDaemonSuite) TestHttpsInfo(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"} + out, err := s.d.CmdWithArgs(daemonArgs, "info") + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } +} + +// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func (s *DockerDaemonSuite) TestHttpsInfoRogueCert(c *check.C) { + const ( + errBadCertificate = "remote error: bad certificate" + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} + out, err := s.d.CmdWithArgs(daemonArgs, "info") + if err == nil || !strings.Contains(out, errBadCertificate) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) + } +} + +// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func (s *DockerDaemonSuite) TestHttpsInfoRogueServerCert(c *check.C) { + const ( + errCaUnknown = "x509: certificate signed by unknown authority" + testDaemonRogueHTTPSAddr = "tcp://localhost:4272" + ) + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem", + "--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + daemonArgs := []string{"--host", testDaemonRogueHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} + out, err := s.d.CmdWithArgs(daemonArgs, "info") + if err == nil || !strings.Contains(out, errCaUnknown) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) + } +} + +func pingContainers(c *check.C, d *Daemon, expectFailure bool) { + var dargs []string + if d != nil { + dargs = []string{"--host", d.sock()} + } + + args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, args...) + + args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c") + pingCmd := "ping -c 1 %s -W 1" + args = append(args, fmt.Sprintf(pingCmd, "alias1")) + _, _, err := dockerCmdWithError(c, args...) + + if expectFailure { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + args = append(dargs, "rm", "-f", "container1") + dockerCmd(c, args...) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + socket := filepath.Join(s.d.folder, "docker.sock") + + out, err := s.d.Cmd("run", "-d", "-v", socket+":/sock", "busybox") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(s.d.Restart(), check.IsNil) +} + +func (s *DockerDaemonSuite) TestCleanupMountsAfterCrash(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + c.Assert(s.d.Start(), check.IsNil) + mountOut, err := exec.Command("mount").CombinedOutput() + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, check.Commentf("Something mounted from older daemon start: %s", mountOut)) +} + +func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { + testRequires(c, NativeExecDriver) + c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil) + + out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out)) + + out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out)) + + out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, true, + check.Commentf("There should be eth0 in container when --net=host when bridge network is disabled: %s", out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + t.Fatal(err) + } + if out, err := s.d.Cmd("run", "-ti", "-d", "--name", "test", "busybox"); err != nil { + t.Fatal(out, err) + } + + if err := s.d.Restart(); err != nil { + t.Fatal(err) + } + // Container 'test' should be removed without error + if out, err := s.d.Cmd("rm", "test"); err != nil { + t.Fatal(out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") + if err != nil { + c.Fatal(out, err) + } + if out, err := s.d.Cmd("stop", "netns"); err != nil { + c.Fatal(out, err) + } + + // Construct netns file name from container id + out = strings.TrimSpace(out) + nsFile := out[:12] + + // Test if the file still exists + out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", "/var/run/docker/netns/"+nsFile)) + out = strings.TrimSpace(out) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, check.Equals, "/var/run/docker/netns/"+nsFile, check.Commentf("Output: %s", out)) + + // Remove the container and restart the daemon + if out, err := s.d.Cmd("rm", "netns"); err != nil { + c.Fatal(out, err) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // Test again and see now the netns file does not exist + out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", "/var/run/docker/netns/"+nsFile)) + out = strings.TrimSpace(out) + c.Assert(err, check.Not(check.IsNil), check.Commentf("Output: %s", out)) + // c.Assert(out, check.Equals, "", check.Commentf("Output: %s", out)) +} + +// tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored +func (s *DockerDaemonSuite) TestDaemonNoTlsCliTlsVerifyWithEnv(c *check.C) { + host := "tcp://localhost:4271" + c.Assert(s.d.Start("-H", host), check.IsNil) + cmd := exec.Command(dockerBinary, "-H", host, "info") + cmd.Env = []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"} + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, check.Not(check.IsNil), check.Commentf("%s", out)) + c.Assert(strings.Contains(out, "error occurred trying to connect"), check.Equals, true) + +} + +func setupV6() error { + // Hack to get the right IPv6 address on docker0, which has already been created + err := exec.Command("ip", "addr", "add", "fe80::1/64", "dev", "docker0").Run() + if err != nil { + return err + } + return nil +} + +func teardownV6() error { + err := exec.Command("ip", "addr", "del", "fe80::1/64", "dev", "docker0").Run() + if err != nil { + return err + } + return nil +} diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go new file mode 100644 index 00000000..b5fc1bd2 --- /dev/null +++ b/integration-cli/docker_cli_diff_test.go @@ -0,0 +1,88 @@ +package main + +import ( + "strings" + + "github.com/go-check/check" +) + +// ensure that an added file shows up in docker diff +func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { + containerCmd := `echo foo > /root/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + found := false + for _, line := range strings.Split(out, "\n") { + if strings.Contains("A /root/bar", line) { + found = true + break + } + } + if !found { + c.Errorf("couldn't find the new file in docker diff's output: %v", out) + } +} + +// test to ensure GH #3840 doesn't occur any more +func (s *DockerSuite) TestDiffEnsureDockerinitFilesAreIgnored(c *check.C) { + // this is a list of files which shouldn't show up in `docker diff` + dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"} + containerCount := 5 + + // we might not run into this problem from the first run, so start a few containers + for i := 0; i < containerCount; i++ { + containerCmd := `echo foo > /root/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + for _, filename := range dockerinitFiles { + if strings.Contains(out, filename) { + c.Errorf("found file which should've been ignored %v in diff output", filename) + } + } + } +} + +func (s *DockerSuite) TestDiffEnsureOnlyKmsgAndPtmx(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "0") + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + expected := map[string]bool{ + "C /dev": true, + "A /dev/full": true, // busybox + "C /dev/ptmx": true, // libcontainer + "A /dev/kmsg": true, // lxc + "A /dev/fd": true, + "A /dev/fuse": true, + "A /dev/ptmx": true, + "A /dev/null": true, + "A /dev/random": true, + "A /dev/stdout": true, + "A /dev/stderr": true, + "A /dev/tty1": true, + "A /dev/stdin": true, + "A /dev/tty": true, + "A /dev/urandom": true, + "A /dev/zero": true, + } + + for _, line := range strings.Split(out, "\n") { + if line != "" && !expected[line] { + c.Errorf("%q is shown in the diff but shouldn't", line) + } + } +} + +// https://github.com/docker/docker/pull/14381#discussion_r33859347 +func (s *DockerSuite) TestDiffEmptyArgClientError(c *check.C) { + out, _, err := dockerCmdWithError(c, "diff", "") + c.Assert(err, check.NotNil) + c.Assert(strings.TrimSpace(out), check.Equals, "Container name cannot be empty") +} diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go new file mode 100644 index 00000000..6742ea45 --- /dev/null +++ b/integration-cli/docker_cli_events_test.go @@ -0,0 +1,669 @@ +package main + +import ( + "bufio" + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { + image := "busybox" + + // Start stopwatch, generate an event + time.Sleep(time.Second) // so that we don't grab events from previous test occured in the same second + start := daemonTime(c) + time.Sleep(time.Second) // remote API precision is only a second, wait a while before creating an event + dockerCmd(c, "tag", image, "timestamptest:1") + dockerCmd(c, "rmi", "timestamptest:1") + time.Sleep(time.Second) // so that until > since + end := daemonTime(c) + + // List of available time formats to --since + unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } + rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } + duration := func(t time.Time) string { return time.Now().Sub(t).String() } + + // --since=$start must contain only the 'untag' event + for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { + since, until := f(start), f(end) + out, _ := dockerCmd(c, "events", "--since="+since, "--until="+until) + events := strings.Split(strings.TrimSpace(out), "\n") + if len(events) != 2 { + c.Fatalf("unexpected events, was expecting only 2 events tag/untag (since=%s, until=%s) out=%s", since, until, out) + } + if !strings.Contains(out, "untag") { + c.Fatalf("expected 'untag' event not found (since=%s, until=%s) out=%s", since, until, out) + } + } + +} + +func (s *DockerSuite) TestEventsUntag(c *check.C) { + image := "busybox" + dockerCmd(c, "tag", image, "utest:tag1") + dockerCmd(c, "tag", image, "utest:tag2") + dockerCmd(c, "rmi", "utest:tag1") + dockerCmd(c, "rmi", "utest:tag2") + eventsCmd := exec.Command(dockerBinary, "events", "--since=1") + out, exitCode, _, err := runCommandWithOutputForDuration(eventsCmd, time.Duration(time.Millisecond*200)) + if exitCode != 0 || err != nil { + c.Fatalf("Failed to get events - exit code %d: %s", exitCode, err) + } + events := strings.Split(out, "\n") + nEvents := len(events) + // The last element after the split above will be an empty string, so we + // get the two elements before the last, which are the untags we're + // looking for. + for _, v := range events[nEvents-3 : nEvents-1] { + if !strings.Contains(v, "untag") { + c.Fatalf("event should be untag, not %#v", v) + } + } +} + +func (s *DockerSuite) TestEventsContainerFailStartDie(c *check.C) { + + out, _ := dockerCmd(c, "images", "-q") + image := strings.Split(out, "\n")[0] + if _, _, err := dockerCmdWithError(c, "run", "--name", "testeventdie", image, "blerg"); err == nil { + c.Fatalf("Container run with command blerg should have failed, but it did not") + } + + out, _ = dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + events := strings.Split(out, "\n") + if len(events) <= 1 { + c.Fatalf("Missing expected event") + } + + startEvent := strings.Fields(events[len(events)-3]) + dieEvent := strings.Fields(events[len(events)-2]) + + if startEvent[len(startEvent)-1] != "start" { + c.Fatalf("event should be start, not %#v", startEvent) + } + if dieEvent[len(dieEvent)-1] != "die" { + c.Fatalf("event should be die, not %#v", dieEvent) + } + +} + +func (s *DockerSuite) TestEventsLimit(c *check.C) { + + var waitGroup sync.WaitGroup + errChan := make(chan error, 17) + + args := []string{"run", "--rm", "busybox", "true"} + for i := 0; i < 17; i++ { + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + errChan <- exec.Command(dockerBinary, args...).Run() + }() + } + + waitGroup.Wait() + close(errChan) + + for err := range errChan { + if err != nil { + c.Fatalf("%q failed with error: %v", strings.Join(args, " "), err) + } + } + + out, _ := dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + events := strings.Split(out, "\n") + nEvents := len(events) - 1 + if nEvents != 64 { + c.Fatalf("events should be limited to 64, but received %d", nEvents) + } +} + +func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + if len(events) < 5 { + c.Fatalf("Missing expected event") + } + createEvent := strings.Fields(events[len(events)-5]) + attachEvent := strings.Fields(events[len(events)-4]) + startEvent := strings.Fields(events[len(events)-3]) + dieEvent := strings.Fields(events[len(events)-2]) + destroyEvent := strings.Fields(events[len(events)-1]) + if createEvent[len(createEvent)-1] != "create" { + c.Fatalf("event should be create, not %#v", createEvent) + } + if attachEvent[len(createEvent)-1] != "attach" { + c.Fatalf("event should be attach, not %#v", attachEvent) + } + if startEvent[len(startEvent)-1] != "start" { + c.Fatalf("event should be start, not %#v", startEvent) + } + if dieEvent[len(dieEvent)-1] != "die" { + c.Fatalf("event should be die, not %#v", dieEvent) + } + if destroyEvent[len(destroyEvent)-1] != "destroy" { + c.Fatalf("event should be destroy, not %#v", destroyEvent) + } + +} + +func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "true") + timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano) + timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1) + out, _ := dockerCmd(c, "events", fmt.Sprintf("--since='%s'", timeBeginning), + fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + if len(events) < 5 { + c.Fatalf("Missing expected event") + } + createEvent := strings.Fields(events[len(events)-5]) + attachEvent := strings.Fields(events[len(events)-4]) + startEvent := strings.Fields(events[len(events)-3]) + dieEvent := strings.Fields(events[len(events)-2]) + destroyEvent := strings.Fields(events[len(events)-1]) + if createEvent[len(createEvent)-1] != "create" { + c.Fatalf("event should be create, not %#v", createEvent) + } + if attachEvent[len(attachEvent)-1] != "attach" { + c.Fatalf("event should be attach, not %#v", attachEvent) + } + if startEvent[len(startEvent)-1] != "start" { + c.Fatalf("event should be start, not %#v", startEvent) + } + if dieEvent[len(dieEvent)-1] != "die" { + c.Fatalf("event should be die, not %#v", dieEvent) + } + if destroyEvent[len(destroyEvent)-1] != "destroy" { + c.Fatalf("event should be destroy, not %#v", destroyEvent) + } + +} + +func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { + name := "testimageevents" + _, err := buildImage(name, + `FROM scratch + MAINTAINER "docker"`, + true) + if err != nil { + c.Fatal(err) + } + if err := deleteImages(name); err != nil { + c.Fatal(err) + } + out, _ := dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + events := strings.Split(out, "\n") + + events = events[:len(events)-1] + if len(events) < 2 { + c.Fatalf("Missing expected event") + } + untagEvent := strings.Fields(events[len(events)-2]) + deleteEvent := strings.Fields(events[len(events)-1]) + if untagEvent[len(untagEvent)-1] != "untag" { + c.Fatalf("untag should be untag, not %#v", untagEvent) + } + if deleteEvent[len(deleteEvent)-1] != "delete" { + c.Fatalf("delete should be delete, not %#v", deleteEvent) + } +} + +func (s *DockerSuite) TestEventsImageTag(c *check.C) { + time.Sleep(time.Second * 2) // because API has seconds granularity + since := daemonTime(c).Unix() + image := "testimageevents:tag" + dockerCmd(c, "tag", "busybox", image) + + out, _ := dockerCmd(c, "events", + fmt.Sprintf("--since=%d", since), + fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + + events := strings.Split(strings.TrimSpace(out), "\n") + if len(events) != 1 { + c.Fatalf("was expecting 1 event. out=%s", out) + } + event := strings.TrimSpace(events[0]) + expectedStr := image + ": tag" + + if !strings.HasSuffix(event, expectedStr) { + c.Fatalf("wrong event format. expected='%s' got=%s", expectedStr, event) + } + +} + +func (s *DockerSuite) TestEventsImagePull(c *check.C) { + since := daemonTime(c).Unix() + testRequires(c, Network) + + dockerCmd(c, "pull", "hello-world") + + out, _ := dockerCmd(c, "events", + fmt.Sprintf("--since=%d", since), + fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + + if !strings.HasSuffix(event, "hello-world:latest: pull") { + c.Fatalf("Missing pull event - got:%q", event) + } + +} + +func (s *DockerSuite) TestEventsImageImport(c *check.C) { + since := daemonTime(c).Unix() + + id := make(chan string) + eventImport := make(chan struct{}) + eventsCmd := exec.Command(dockerBinary, "events", "--since", strconv.FormatInt(since, 10)) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + if err := eventsCmd.Start(); err != nil { + c.Fatal(err) + } + defer eventsCmd.Process.Kill() + + go func() { + containerID := <-id + + matchImport := regexp.MustCompile(containerID + `: import$`) + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + if matchImport.MatchString(scanner.Text()) { + close(eventImport) + } + } + }() + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _, err = runCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + if err != nil { + c.Errorf("import failed with errors: %v, output: %q", err, out) + } + newContainerID := strings.TrimSpace(out) + id <- newContainerID + + select { + case <-time.After(5 * time.Second): + c.Fatal("failed to observe image import in timely fashion") + case <-eventImport: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsFilters(c *check.C) { + parseEvents := func(out, match string) { + events := strings.Split(out, "\n") + events = events[:len(events)-1] + for _, event := range events { + eventFields := strings.Fields(event) + eventName := eventFields[len(eventFields)-1] + if ok, err := regexp.MatchString(match, eventName); err != nil || !ok { + c.Fatalf("event should match %s, got %#v, err: %v", match, eventFields, err) + } + } + } + + since := daemonTime(c).Unix() + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "event=die") + parseEvents(out, "die") + + out, _ = dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "event=die", "--filter", "event=start") + parseEvents(out, "((die)|(start))") + + // make sure we at least got 2 start events + count := strings.Count(out, "start") + if count < 2 { + c.Fatalf("should have had 2 start events but had %d, out: %s", count, out) + } + +} + +func (s *DockerSuite) TestEventsFilterImageName(c *check.C) { + since := daemonTime(c).Unix() + + out, _ := dockerCmd(c, "run", "--name", "container_1", "-d", "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--name", "container_2", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + name := "busybox" + out, _ = dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", fmt.Sprintf("image=%s", name)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + if len(events) == 0 { + c.Fatalf("Expected events but found none for the image busybox:latest") + } + count1 := 0 + count2 := 0 + + for _, e := range events { + if strings.Contains(e, container1) { + count1++ + } else if strings.Contains(e, container2) { + count2++ + } + } + if count1 == 0 || count2 == 0 { + c.Fatalf("Expected events from each container but got %d from %s and %d from %s", count1, container1, count2, container2) + } + +} + +func (s *DockerSuite) TestEventsFilterContainer(c *check.C) { + since := fmt.Sprintf("%d", daemonTime(c).Unix()) + nameID := make(map[string]string) + + for _, name := range []string{"container_1", "container_2"} { + dockerCmd(c, "run", "--name", name, "busybox", "true") + id, err := inspectField(name, "Id") + if err != nil { + c.Fatal(err) + } + nameID[name] = id + } + + until := fmt.Sprintf("%d", daemonTime(c).Unix()) + + checkEvents := func(id string, events []string) error { + if len(events) != 4 { // create, attach, start, die + return fmt.Errorf("expected 3 events, got %v", events) + } + for _, event := range events { + e := strings.Fields(event) + if len(e) < 3 { + return fmt.Errorf("got malformed event: %s", event) + } + + // Check the id + parsedID := strings.TrimSuffix(e[1], ":") + if parsedID != id { + return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, parsedID) + } + } + return nil + } + + for name, ID := range nameID { + // filter by names + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+name) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + if err := checkEvents(ID, events); err != nil { + c.Fatal(err) + } + + // filter by ID's + out, _ = dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+ID) + events = strings.Split(strings.TrimSuffix(out, "\n"), "\n") + if err := checkEvents(ID, events); err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerSuite) TestEventsStreaming(c *check.C) { + start := daemonTime(c).Unix() + + id := make(chan string) + eventCreate := make(chan struct{}) + eventStart := make(chan struct{}) + eventDie := make(chan struct{}) + eventDestroy := make(chan struct{}) + + eventsCmd := exec.Command(dockerBinary, "events", "--since", strconv.FormatInt(start, 10)) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + if err := eventsCmd.Start(); err != nil { + c.Fatalf("failed to start 'docker events': %s", err) + } + defer eventsCmd.Process.Kill() + + go func() { + containerID := <-id + + matchCreate := regexp.MustCompile(containerID + `: \(from busybox:latest\) create$`) + matchStart := regexp.MustCompile(containerID + `: \(from busybox:latest\) start$`) + matchDie := regexp.MustCompile(containerID + `: \(from busybox:latest\) die$`) + matchDestroy := regexp.MustCompile(containerID + `: \(from busybox:latest\) destroy$`) + + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + switch { + case matchCreate.MatchString(scanner.Text()): + close(eventCreate) + case matchStart.MatchString(scanner.Text()): + close(eventStart) + case matchDie.MatchString(scanner.Text()): + close(eventDie) + case matchDestroy.MatchString(scanner.Text()): + close(eventDestroy) + } + } + }() + + out, _ := dockerCmd(c, "run", "-d", "busybox:latest", "true") + cleanedContainerID := strings.TrimSpace(out) + id <- cleanedContainerID + + select { + case <-time.After(5 * time.Second): + c.Fatal("failed to observe container create in timely fashion") + case <-eventCreate: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + c.Fatal("failed to observe container start in timely fashion") + case <-eventStart: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + c.Fatal("failed to observe container die in timely fashion") + case <-eventDie: + // ignore, done + } + + dockerCmd(c, "rm", cleanedContainerID) + + select { + case <-time.After(5 * time.Second): + c.Fatal("failed to observe container destroy in timely fashion") + case <-eventDestroy: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsCommit(c *check.C) { + since := daemonTime(c).Unix() + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), check.IsNil) + + dockerCmd(c, "commit", "-m", "test", cID) + dockerCmd(c, "stop", cID) + + out, _ = dockerCmd(c, "events", "--since=0", "-f", "container="+cID, "--until="+strconv.Itoa(int(since))) + if !strings.Contains(out, " commit\n") { + c.Fatalf("Missing 'commit' log event\n%s", out) + } +} + +func (s *DockerSuite) TestEventsCopy(c *check.C) { + since := daemonTime(c).Unix() + + // Build a test image. + id, err := buildImage("cpimg", ` + FROM busybox + RUN echo HI > /tmp/file`, true) + if err != nil { + c.Fatalf("Couldn't create image: %q", err) + } + + // Create an empty test file. + tempFile, err := ioutil.TempFile("", "test-events-copy-") + if err != nil { + c.Fatal(err) + } + defer os.Remove(tempFile.Name()) + + if err := tempFile.Close(); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "create", "--name=cptest", id) + + dockerCmd(c, "cp", "cptest:/tmp/file", tempFile.Name()) + + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+strconv.Itoa(int(since))) + if !strings.Contains(out, " archive-path\n") { + c.Fatalf("Missing 'archive-path' log event\n%s", out) + } + + dockerCmd(c, "cp", tempFile.Name(), "cptest:/tmp/filecopy") + + out, _ = dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+strconv.Itoa(int(since))) + if !strings.Contains(out, " extract-to-dir\n") { + c.Fatalf("Missing 'extract-to-dir' log event\n%s", out) + } +} + +func (s *DockerSuite) TestEventsResize(c *check.C) { + since := daemonTime(c).Unix() + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), check.IsNil) + + endpoint := "/containers/" + cID + "/resize?h=80&w=24" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + dockerCmd(c, "stop", cID) + + out, _ = dockerCmd(c, "events", "--since=0", "-f", "container="+cID, "--until="+strconv.Itoa(int(since))) + if !strings.Contains(out, " resize\n") { + c.Fatalf("Missing 'resize' log event\n%s", out) + } +} + +func (s *DockerSuite) TestEventsAttach(c *check.C) { + since := daemonTime(c).Unix() + + out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") + cID := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "attach", cID) + stdin, err := cmd.StdinPipe() + c.Assert(err, check.IsNil) + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Process.Kill() + + // Make sure we're done attaching by writing/reading some stuff + if _, err := stdin.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + c.Assert(stdin.Close(), check.IsNil) + + dockerCmd(c, "stop", cID) + + out, _ = dockerCmd(c, "events", "--since=0", "-f", "container="+cID, "--until="+strconv.Itoa(int(since))) + if !strings.Contains(out, " attach\n") { + c.Fatalf("Missing 'attach' log event\n%s", out) + } +} + +func (s *DockerSuite) TestEventsRename(c *check.C) { + since := daemonTime(c).Unix() + + dockerCmd(c, "run", "--name", "oldName", "busybox", "true") + dockerCmd(c, "rename", "oldName", "newName") + + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=newName", "--until="+strconv.Itoa(int(since))) + if !strings.Contains(out, " rename\n") { + c.Fatalf("Missing 'rename' log event\n%s", out) + } +} + +func (s *DockerSuite) TestEventsTop(c *check.C) { + since := daemonTime(c).Unix() + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), check.IsNil) + + dockerCmd(c, "top", cID) + dockerCmd(c, "stop", cID) + + out, _ = dockerCmd(c, "events", "--since=0", "-f", "container="+cID, "--until="+strconv.Itoa(int(since))) + if !strings.Contains(out, " top\n") { + c.Fatalf("Missing 'top' log event\n%s", out) + } +} + +// #13753 +func (s *DockerSuite) TestEventsDefaultEmpty(c *check.C) { + dockerCmd(c, "run", "busybox") + out, _ := dockerCmd(c, "events", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + c.Assert(strings.TrimSpace(out), check.Equals, "") +} + +// #14316 +func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) { + testRequires(c, Network) + since := daemonTime(c).Unix() + repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), check.IsNil) + + dockerCmd(c, "commit", cID, repoName) + dockerCmd(c, "stop", cID) + dockerCmd(c, "push", repoName) + + out, _ = dockerCmd(c, "events", "--since=0", "-f", "image="+repoName, "-f", "event=push", "--until="+strconv.Itoa(int(since))) + if !strings.Contains(out, repoName+": push\n") { + c.Fatalf("Missing 'push' log event for image %s\n%s", repoName, out) + } +} diff --git a/integration-cli/docker_cli_events_unix_test.go b/integration-cli/docker_cli_events_unix_test.go new file mode 100644 index 00000000..1a08f2b3 --- /dev/null +++ b/integration-cli/docker_cli_events_unix_test.go @@ -0,0 +1,53 @@ +// +build !windows + +package main + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "unicode" + + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #5979 +func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { + since := daemonTime(c).Unix() + dockerCmd(c, "run", "busybox", "true") + + file, err := ioutil.TempFile("", "") + if err != nil { + c.Fatalf("could not create temp file: %v", err) + } + defer os.Remove(file.Name()) + + command := fmt.Sprintf("%s events --since=%d --until=%d > %s", dockerBinary, since, daemonTime(c).Unix(), file.Name()) + _, tty, err := pty.Open() + if err != nil { + c.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + if err := cmd.Run(); err != nil { + c.Fatalf("run err for command %q: %v", command, err) + } + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + for _, ch := range scanner.Text() { + if unicode.IsControl(ch) { + c.Fatalf("found control character %v", []byte(string(ch))) + } + } + } + if err := scanner.Err(); err != nil { + c.Fatalf("Scan err for command %q: %v", command, err) + } + +} diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go new file mode 100644 index 00000000..8e85988f --- /dev/null +++ b/integration-cli/docker_cli_exec_test.go @@ -0,0 +1,545 @@ +// +build !test_no_exec + +package main + +import ( + "bufio" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExec(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + + out, _ := dockerCmd(c, "exec", "testing", "cat", "/tmp/file") + out = strings.Trim(out, "\r\n") + if out != "test" { + c.Errorf("container exec should've printed test but printed %q", out) + } + +} + +func (s *DockerSuite) TestExecInteractive(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + + execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") + stdin, err := execCmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + stdout, err := execCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := execCmd.Start(); err != nil { + c.Fatal(err) + } + if _, err := stdin.Write([]byte("cat /tmp/file\n")); err != nil { + c.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + c.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "test" { + c.Fatalf("Output should be 'test', got '%q'", line) + } + if err := stdin.Close(); err != nil { + c.Fatal(err) + } + errChan := make(chan error) + go func() { + errChan <- execCmd.Wait() + close(errChan) + }() + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(1 * time.Second): + c.Fatal("docker exec failed to exit on stdin close") + } + +} + +func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "restart", cleanedContainerID) + + out, _ = dockerCmd(c, "exec", cleanedContainerID, "echo", "hello") + outStr := strings.TrimSpace(out) + if outStr != "hello" { + c.Errorf("container should've printed hello, instead printed %q", outStr) + } +} + +func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { + testRequires(c, SameHostDaemon) + + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: err=%v\n%s", err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + if out, err := s.d.Cmd("start", "top"); err != nil { + c.Fatalf("Could not start top after daemon restart: err=%v\n%s", err, out) + } + + out, err := s.d.Cmd("exec", "top", "echo", "hello") + if err != nil { + c.Fatalf("Could not exec on container top: err=%v\n%s", err, out) + } + + outStr := strings.TrimSpace(string(out)) + if outStr != "hello" { + c.Errorf("container should've printed hello, instead printed %q", outStr) + } +} + +// Regression test for #9155, #9044 +func (s *DockerSuite) TestExecEnv(c *check.C) { + dockerCmd(c, "run", "-e", "LALA=value1", "-e", "LALA=value2", + "-d", "--name", "testing", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "testing", "env") + if strings.Contains(out, "LALA=value1") || + !strings.Contains(out, "LALA=value2") || + !strings.Contains(out, "HOME=/root") { + c.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root") + } +} + +func (s *DockerSuite) TestExecExitStatus(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") + + // Test normal (non-detached) case first + cmd := exec.Command(dockerBinary, "exec", "top", "sh", "-c", "exit 23") + ec, _ := runCommand(cmd) + if ec != 23 { + c.Fatalf("Should have had an ExitCode of 23, not: %d", ec) + } +} + +func (s *DockerSuite) TestExecPausedContainer(c *check.C) { + defer unpauseAllContainers() + + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + ContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", "testing") + out, _, err := dockerCmdWithError(c, "exec", "-i", "-t", ContainerID, "echo", "hello") + if err == nil { + c.Fatal("container should fail to exec new command if it is paused") + } + + expected := ContainerID + " is paused, unpause the container before exec" + if !strings.Contains(out, expected) { + c.Fatal("container should not exec new command if it is paused") + } +} + +// regression test for #9476 +func (s *DockerSuite) TestExecTtyCloseStdin(c *check.C) { + dockerCmd(c, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") + + cmd := exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") + stdinRw, err := cmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + + stdinRw.Write([]byte("test")) + stdinRw.Close() + + if out, _, err := runCommandWithOutput(cmd); err != nil { + c.Fatal(out, err) + } + + out, _ := dockerCmd(c, "top", "exec_tty_stdin") + outArr := strings.Split(out, "\n") + if len(outArr) > 3 || strings.Contains(out, "nsenter-exec") { + c.Fatalf("exec process left running\n\t %s", out) + } +} + +func (s *DockerSuite) TestExecTtyWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + defer func() { + dockerCmd(c, "kill", id) + }() + + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "cannot enable tty mode" + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("exec should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("exec is running but should have failed") + } +} + +func (s *DockerSuite) TestExecParseError(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") + + // Test normal (non-detached) case first + cmd := exec.Command(dockerBinary, "exec", "top") + if _, stderr, code, err := runCommandWithStdoutStderr(cmd); err == nil || !strings.Contains(stderr, "See '"+dockerBinary+" exec --help'") || code == 0 { + c.Fatalf("Should have thrown error & point to help: %s", stderr) + } +} + +func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + if err := exec.Command(dockerBinary, "exec", "testing", "top").Start(); err != nil { + c.Fatal(err) + } + + type dstop struct { + out []byte + err error + } + + ch := make(chan dstop) + go func() { + out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() + ch <- dstop{out, err} + close(ch) + }() + select { + case <-time.After(3 * time.Second): + c.Fatal("Container stop timed out") + case s := <-ch: + c.Assert(s.err, check.IsNil) + } +} + +func (s *DockerSuite) TestExecCgroup(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "testing", "cat", "/proc/1/cgroup") + containerCgroups := sort.StringSlice(strings.Split(out, "\n")) + + var wg sync.WaitGroup + var mu sync.Mutex + execCgroups := []sort.StringSlice{} + errChan := make(chan error) + // exec a few times concurrently to get consistent failure + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + out, _, err := dockerCmdWithError(c, "exec", "testing", "cat", "/proc/self/cgroup") + if err != nil { + errChan <- err + return + } + cg := sort.StringSlice(strings.Split(out, "\n")) + + mu.Lock() + execCgroups = append(execCgroups, cg) + mu.Unlock() + wg.Done() + }() + } + wg.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, check.IsNil) + } + + for _, cg := range execCgroups { + if !reflect.DeepEqual(cg, containerCgroups) { + fmt.Println("exec cgroups:") + for _, name := range cg { + fmt.Printf(" %s\n", name) + } + + fmt.Println("container cgroups:") + for _, name := range containerCgroups { + fmt.Printf(" %s\n", name) + } + c.Fatal("cgroups mismatched") + } + } +} + +func (s *DockerSuite) TestInspectExecID(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSuffix(out, "\n") + + out, err := inspectField(id, "ExecIDs") + if err != nil { + c.Fatalf("failed to inspect container: %s, %v", out, err) + } + if out != "[]" { + c.Fatalf("ExecIDs should be empty, got: %s", out) + } + + // Start an exec, have it block waiting for input so we can do some checking + cmd := exec.Command(dockerBinary, "exec", "-i", id, "sh", "-c", "read a") + execStdin, _ := cmd.StdinPipe() + + if err = cmd.Start(); err != nil { + c.Fatalf("failed to start the exec cmd: %q", err) + } + + // Give the exec 10 chances/seconds to start then give up and stop the test + tries := 10 + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out, err = inspectField(id, "ExecIDs") + if err != nil { + c.Fatalf("failed to inspect container: %s, %v", out, err) + } + + out = strings.TrimSuffix(out, "\n") + if out != "[]" && out != "" { + break + } + if i+1 == tries { + c.Fatalf("ExecIDs should not be empty, got: %s", out) + } + time.Sleep(1 * time.Second) + } + + // Save execID for later + execID, err := inspectFilter(id, "index .ExecIDs 0") + if err != nil { + c.Fatalf("failed to get the exec id: %v", err) + } + + // End the exec by closing its stdin, and wait for it to end + execStdin.Close() + cmd.Wait() + + // All execs for the container should be gone now + out, err = inspectField(id, "ExecIDs") + if err != nil { + c.Fatalf("failed to inspect container: %s, %v", out, err) + } + + out = strings.TrimSuffix(out, "\n") + if out != "[]" && out != "" { + c.Fatalf("ExecIDs should be empty, got: %s", out) + } + + // But we should still be able to query the execID + sc, body, err := sockRequest("GET", "/exec/"+execID+"/json", nil) + if sc != http.StatusOK { + c.Fatalf("received status != 200 OK: %d\n%s", sc, body) + } +} + +func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { + var out string + out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + if idA == "" { + c.Fatal(out, "id should not be nil") + } + out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + if idB == "" { + c.Fatal(out, "id should not be nil") + } + + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") +} + +func (s *DockerSuite) TestRunExecDir(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + execDir := filepath.Join(execDriverPath, id) + stateFile := filepath.Join(execDir, "state.json") + + { + fi, err := os.Stat(execDir) + if err != nil { + c.Fatal(err) + } + if !fi.IsDir() { + c.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err != nil { + c.Fatal(err) + } + } + + dockerCmd(c, "stop", id) + { + _, err := os.Stat(execDir) + if err == nil { + c.Fatal(err) + } + if err == nil { + c.Fatalf("Exec directory %q exists for removed container!", execDir) + } + if !os.IsNotExist(err) { + c.Fatalf("Error should be about non-existing, got %s", err) + } + } + dockerCmd(c, "start", id) + { + fi, err := os.Stat(execDir) + if err != nil { + c.Fatal(err) + } + if !fi.IsDir() { + c.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err != nil { + c.Fatal(err) + } + } + dockerCmd(c, "rm", "-f", id) + { + _, err := os.Stat(execDir) + if err == nil { + c.Fatal(err) + } + if err == nil { + c.Fatalf("Exec directory %q is exists for removed container!", execDir) + } + if !os.IsNotExist(err) { + c.Fatalf("Error should be about non-existing, got %s", err) + } + } +} + +func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { + testRequires(c, SameHostDaemon) + + for _, fn := range []string{"resolv.conf", "hosts"} { + deleteAllContainers() + + content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) + if err != nil { + c.Fatal(err) + } + + if strings.TrimSpace(string(content)) != "success" { + c.Fatal("Content was not what was modified in the container", string(content)) + } + + out, _ := dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + contID := strings.TrimSpace(out) + netFilePath := containerStorageFile(contID, fn) + + f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + if err != nil { + c.Fatal(err) + } + + if _, err := f.Seek(0, 0); err != nil { + f.Close() + c.Fatal(err) + } + + if err := f.Truncate(0); err != nil { + f.Close() + c.Fatal(err) + } + + if _, err := f.Write([]byte("success2\n")); err != nil { + f.Close() + c.Fatal(err) + } + f.Close() + + res, _ := dockerCmd(c, "exec", contID, "cat", "/etc/"+fn) + if res != "success2\n" { + c.Fatalf("Expected content of %s: %q, got: %q", fn, "success2\n", res) + } + } +} + +func (s *DockerSuite) TestExecWithUser(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "-u", "1", "parent", "id") + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + c.Fatalf("exec with user by id expected daemon user got %s", out) + } + + out, _ = dockerCmd(c, "exec", "-u", "root", "parent", "id") + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + c.Fatalf("exec with user by root expected root user got %s", out) + } +} + +func (s *DockerSuite) TestExecWithImageUser(c *check.C) { + name := "testbuilduser" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio`, + true) + if err != nil { + c.Fatalf("Could not build image %s: %v", name, err) + } + + dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top") + + out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami") + if !strings.Contains(out, "dockerio") { + c.Fatalf("exec with user by id expected dockerio user got %s", out) + } +} + +func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) { + dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top") + if _, status := dockerCmd(c, "exec", "parent", "true"); status != 0 { + c.Fatalf("exec into a read-only container failed with exit status %d", status) + } +} diff --git a/integration-cli/docker_cli_exec_unix_test.go b/integration-cli/docker_cli_exec_unix_test.go new file mode 100644 index 00000000..28c202c4 --- /dev/null +++ b/integration-cli/docker_cli_exec_unix_test.go @@ -0,0 +1,44 @@ +// +build !windows,!test_no_exec + +package main + +import ( + "bytes" + "io" + "os/exec" + "strings" + "time" + + "github.com/go-check/check" + "github.com/kr/pty" +) + +// regression test for #12546 +func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "exec", "-i", contID, "echo", "-n", "hello") + p, err := pty.Start(cmd) + if err != nil { + c.Fatal(err) + } + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + ch := make(chan error) + go func() { ch <- cmd.Wait() }() + + select { + case err := <-ch: + if err != nil { + c.Errorf("cmd finished with error %v", err) + } + if output := b.String(); strings.TrimSpace(output) != "hello" { + c.Fatalf("Unexpected output %s", output) + } + case <-time.After(1 * time.Second): + c.Fatal("timed out running docker exec") + } +} diff --git a/integration-cli/docker_cli_experimental_test.go b/integration-cli/docker_cli_experimental_test.go new file mode 100644 index 00000000..694222bf --- /dev/null +++ b/integration-cli/docker_cli_experimental_test.go @@ -0,0 +1,23 @@ +// +build experimental + +package main + +import ( + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExperimentalVersion(c *check.C) { + out, _ := dockerCmd(c, "version") + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "Experimental (client):") || strings.HasPrefix(line, "Experimental (server):") { + c.Assert(line, check.Matches, "*true") + } + } + + out, _ = dockerCmd(c, "-v") + if !strings.Contains(out, ", experimental") { + c.Fatalf("docker version did not contain experimental: %s", out) + } +} diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go new file mode 100644 index 00000000..a9e75de8 --- /dev/null +++ b/integration-cli/docker_cli_export_import_test.go @@ -0,0 +1,56 @@ +package main + +import ( + "os" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +// export an image and try to import it into a new one +func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { + containerID := "testexportcontainerandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + + out, _ := dockerCmd(c, "export", containerID) + + importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") + importCmd.Stdin = strings.NewReader(out) + out, _, err := runCommandWithOutput(importCmd) + if err != nil { + c.Fatalf("failed to import image: %s, %v", out, err) + } + + cleanedImageID := strings.TrimSpace(out) + if cleanedImageID == "" { + c.Fatalf("output should have been an image id, got: %s", out) + } +} + +// Used to test output flag in the export command +func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { + containerID := "testexportcontainerwithoutputandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + dockerCmd(c, "export", "--output=testexp.tar", containerID) + defer os.Remove("testexp.tar") + + out, _, err := runCommandWithOutput(exec.Command("cat", "testexp.tar")) + if err != nil { + c.Fatal(out, err) + } + + importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") + importCmd.Stdin = strings.NewReader(out) + out, _, err = runCommandWithOutput(importCmd) + if err != nil { + c.Fatalf("failed to import image: %s, %v", out, err) + } + + cleanedImageID := strings.TrimSpace(out) + if cleanedImageID == "" { + c.Fatalf("output should have been an image id, got: %s", out) + } +} diff --git a/integration-cli/docker_cli_help_test.go b/integration-cli/docker_cli_help_test.go new file mode 100644 index 00000000..31130835 --- /dev/null +++ b/integration-cli/docker_cli_help_test.go @@ -0,0 +1,341 @@ +package main + +import ( + "os" + "os/exec" + "runtime" + "strings" + "unicode" + + "github.com/docker/docker/pkg/homedir" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestHelpTextVerify(c *check.C) { + // Make sure main help text fits within 80 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Test for HOME set to its default value and set to "/" on linux + // Yes on windows setting up an array and looping (right now) isn't + // necessary because we just have one value, but we'll need the + // array/loop on linux so we might as well set it up so that we can + // test any number of home dirs later on and all we need to do is + // modify the array - the rest of the testing infrastructure should work + homes := []string{homedir.Get()} + + // Non-Windows machines need to test for this special case of $HOME + if runtime.GOOS != "windows" { + homes = append(homes, "/") + } + + homeKey := homedir.Key() + baseEnvs := os.Environ() + + // Remove HOME env var from list so we can add a new value later. + for i, env := range baseEnvs { + if strings.HasPrefix(env, homeKey+"=") { + baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) + break + } + } + + for _, home := range homes { + // Dup baseEnvs and add our new HOME value + newEnvs := make([]string, len(baseEnvs)+1) + copy(newEnvs, baseEnvs) + newEnvs[len(newEnvs)-1] = homeKey + "=" + home + + scanForHome := runtime.GOOS != "windows" && home != "/" + + // Check main help text to make sure its not over 80 chars + helpCmd := exec.Command(dockerBinary, "help") + helpCmd.Env = newEnvs + out, ec, err := runCommandWithOutput(helpCmd) + if err != nil || ec != 0 { + c.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec) + } + lines := strings.Split(out, "\n") + for _, line := range lines { + if len(line) > 80 { + c.Fatalf("Line is too long(%d chars):\n%s", len(line), line) + } + + // All lines should not end with a space + if strings.HasSuffix(line, " ") { + c.Fatalf("Line should not end with a space: %s", line) + } + + if scanForHome && strings.Contains(line, `=`+home) { + c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) + } + if runtime.GOOS != "windows" { + i := strings.Index(line, homedir.GetShortcutString()) + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + c.Fatalf("Main help should not have used home shortcut:\n%s", line) + } + } + } + + // Make sure each cmd's help text fits within 80 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Pull the list of commands from the "Commands:" section of docker help + helpCmd = exec.Command(dockerBinary, "help") + helpCmd.Env = newEnvs + out, ec, err = runCommandWithOutput(helpCmd) + if err != nil || ec != 0 { + c.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec) + } + i := strings.Index(out, "Commands:") + if i < 0 { + c.Fatalf("Missing 'Commands:' in:\n%s", out) + } + + cmds := []string{} + // Grab all chars starting at "Commands:" + helpOut := strings.Split(out[i:], "\n") + // First line is just "Commands:" + if isLocalDaemon { + // Replace first line with "daemon" command since it's not part of the list of commands. + helpOut[0] = " daemon" + } else { + // Skip first line + helpOut = helpOut[1:] + } + for _, cmd := range helpOut { + var stderr string + + // Stop on blank line or non-idented line + if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { + break + } + + // Grab just the first word of each line + cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] + cmds = append(cmds, cmd) + + // Check the full usage text + helpCmd := exec.Command(dockerBinary, cmd, "--help") + helpCmd.Env = newEnvs + out, stderr, ec, err = runCommandWithStdoutStderr(helpCmd) + if len(stderr) != 0 { + c.Fatalf("Error on %q help. non-empty stderr:%q", cmd, stderr) + } + if strings.HasSuffix(out, "\n\n") { + c.Fatalf("Should not have blank line on %q\nout:%q", cmd, out) + } + if !strings.Contains(out, "--help=false") { + c.Fatalf("Should show full usage on %q\nout:%q", cmd, out) + } + if err != nil || ec != 0 { + c.Fatalf("Error on %q help: %s\nexit code:%d", cmd, out, ec) + } + + // Check each line for lots of stuff + lines := strings.Split(out, "\n") + for _, line := range lines { + if len(line) > 90 { + c.Fatalf("Help for %q is too long(%d chars):\n%s", cmd, + len(line), line) + } + + if scanForHome && strings.Contains(line, `"`+home) { + c.Fatalf("Help for %q should use ~ instead of %q on:\n%s", + cmd, home, line) + } + i := strings.Index(line, "~") + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + c.Fatalf("Help for %q should not have used ~:\n%s", cmd, line) + } + + // If a line starts with 4 spaces then assume someone + // added a multi-line description for an option and we need + // to flag it + if strings.HasPrefix(line, " ") { + c.Fatalf("Help for %q should not have a multi-line option: %s", cmd, line) + } + + // Options should NOT end with a period + if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { + c.Fatalf("Help for %q should not end with a period: %s", cmd, line) + } + + // Options should NOT end with a space + if strings.HasSuffix(line, " ") { + c.Fatalf("Help for %q should not end with a space: %s", cmd, line) + } + + } + + // For each command make sure we generate an error + // if we give a bad arg + dCmd := exec.Command(dockerBinary, cmd, "--badArg") + out, stderr, ec, err = runCommandWithStdoutStderr(dCmd) + if len(out) != 0 || len(stderr) == 0 || ec == 0 || err == nil { + c.Fatalf("Bad results from 'docker %s --badArg'\nec:%d\nstdout:%s\nstderr:%s\nerr:%q", cmd, ec, out, stderr, err) + } + // Be really picky + if strings.HasSuffix(stderr, "\n\n") { + c.Fatalf("Should not have a blank line at the end of 'docker rm'\n%s", stderr) + } + + // Now make sure that each command will print a short-usage + // (not a full usage - meaning no opts section) if we + // are missing a required arg or pass in a bad arg + + // These commands will never print a short-usage so don't test + noShortUsage := map[string]string{ + "images": "", + "login": "", + "logout": "", + } + + if _, ok := noShortUsage[cmd]; !ok { + // For each command run it w/o any args. It will either return + // valid output or print a short-usage + var dCmd *exec.Cmd + var stdout, stderr string + var args []string + + // skipNoArgs are ones that we don't want to try w/o + // any args. Either because it'll hang the test or + // lead to incorrect test result (like false negative). + // Whatever the reason, skip trying to run w/o args and + // jump to trying with a bogus arg. + skipNoArgs := map[string]struct{}{ + "daemon": {}, + "events": {}, + "load": {}, + } + + ec = 0 + if _, ok := skipNoArgs[cmd]; !ok { + args = []string{cmd} + dCmd = exec.Command(dockerBinary, args...) + stdout, stderr, ec, err = runCommandWithStdoutStderr(dCmd) + } + + // If its ok w/o any args then try again with an arg + if ec == 0 { + args = []string{cmd, "badArg"} + dCmd = exec.Command(dockerBinary, args...) + stdout, stderr, ec, err = runCommandWithStdoutStderr(dCmd) + } + + if len(stdout) != 0 || len(stderr) == 0 || ec == 0 || err == nil { + c.Fatalf("Bad output from %q\nstdout:%q\nstderr:%q\nec:%d\nerr:%q", args, stdout, stderr, ec, err) + } + // Should have just short usage + if !strings.Contains(stderr, "\nUsage:\t") { + c.Fatalf("Missing short usage on %q\nstderr:%q", args, stderr) + } + // But shouldn't have full usage + if strings.Contains(stderr, "--help=false") { + c.Fatalf("Should not have full usage on %q\nstderr:%q", args, stderr) + } + if strings.HasSuffix(stderr, "\n\n") { + c.Fatalf("Should not have a blank line on %q\nstderr:%q", args, stderr) + } + } + + } + + expected := 39 + if isLocalDaemon { + expected++ // for the daemon command + } + if len(cmds) != expected { + c.Fatalf("Wrong # of cmds(%d), it should be: %d\nThe list:\n%q", + len(cmds), expected, cmds) + } + } + +} + +func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) { + // Test to make sure the exit code and output (stdout vs stderr) of + // various good and bad cases are what we expect + + // docker : stdout=all, stderr=empty, rc=0 + cmd := exec.Command(dockerBinary) + stdout, stderr, ec, err := runCommandWithStdoutStderr(cmd) + if len(stdout) == 0 || len(stderr) != 0 || ec != 0 || err != nil { + c.Fatalf("Bad results from 'docker'\nec:%d\nstdout:%s\nstderr:%s\nerr:%v", ec, stdout, stderr, err) + } + // Be really pick + if strings.HasSuffix(stdout, "\n\n") { + c.Fatalf("Should not have a blank line at the end of 'docker'\n%s", stdout) + } + + // docker help: stdout=all, stderr=empty, rc=0 + cmd = exec.Command(dockerBinary, "help") + stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd) + if len(stdout) == 0 || len(stderr) != 0 || ec != 0 || err != nil { + c.Fatalf("Bad results from 'docker help'\nec:%d\nstdout:%s\nstderr:%s\nerr:%v", ec, stdout, stderr, err) + } + // Be really pick + if strings.HasSuffix(stdout, "\n\n") { + c.Fatalf("Should not have a blank line at the end of 'docker help'\n%s", stdout) + } + + // docker --help: stdout=all, stderr=empty, rc=0 + cmd = exec.Command(dockerBinary, "--help") + stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd) + if len(stdout) == 0 || len(stderr) != 0 || ec != 0 || err != nil { + c.Fatalf("Bad results from 'docker --help'\nec:%d\nstdout:%s\nstderr:%s\nerr:%v", ec, stdout, stderr, err) + } + // Be really pick + if strings.HasSuffix(stdout, "\n\n") { + c.Fatalf("Should not have a blank line at the end of 'docker --help'\n%s", stdout) + } + + // docker inspect busybox: stdout=all, stderr=empty, rc=0 + // Just making sure stderr is empty on valid cmd + cmd = exec.Command(dockerBinary, "inspect", "busybox") + stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd) + if len(stdout) == 0 || len(stderr) != 0 || ec != 0 || err != nil { + c.Fatalf("Bad results from 'docker inspect busybox'\nec:%d\nstdout:%s\nstderr:%s\nerr:%v", ec, stdout, stderr, err) + } + // Be really pick + if strings.HasSuffix(stdout, "\n\n") { + c.Fatalf("Should not have a blank line at the end of 'docker inspect busyBox'\n%s", stdout) + } + + // docker rm: stdout=empty, stderr=all, rc!=0 + // testing the min arg error msg + cmd = exec.Command(dockerBinary, "rm") + stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd) + if len(stdout) != 0 || len(stderr) == 0 || ec == 0 || err == nil { + c.Fatalf("Bad results from 'docker rm'\nec:%d\nstdout:%s\nstderr:%s\nerr:%v", ec, stdout, stderr, err) + } + // Should not contain full help text but should contain info about + // # of args and Usage line + if !strings.Contains(stderr, "requires a minimum") { + c.Fatalf("Missing # of args text from 'docker rm'\nstderr:%s", stderr) + } + + // docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0 + // testing to make sure no blank line on error + cmd = exec.Command(dockerBinary, "rm", "NoSuchContainer") + stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd) + if len(stdout) != 0 || len(stderr) == 0 || ec == 0 || err == nil { + c.Fatalf("Bad results from 'docker rm NoSuchContainer'\nec:%d\nstdout:%s\nstderr:%s\nerr:%v", ec, stdout, stderr, err) + } + // Be really picky + if strings.HasSuffix(stderr, "\n\n") { + c.Fatalf("Should not have a blank line at the end of 'docker rm'\n%s", stderr) + } + + // docker BadCmd: stdout=empty, stderr=all, rc=0 + cmd = exec.Command(dockerBinary, "BadCmd") + stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd) + if len(stdout) != 0 || len(stderr) == 0 || ec == 0 || err == nil { + c.Fatalf("Bad results from 'docker BadCmd'\nec:%d\nstdout:%s\nstderr:%s\nerr:%v", ec, stdout, stderr, err) + } + if stderr != "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'.\n" { + c.Fatalf("Unexcepted output for 'docker badCmd'\nstderr:%s", stderr) + } + // Be really picky + if strings.HasSuffix(stderr, "\n\n") { + c.Fatalf("Should not have a blank line at the end of 'docker rm'\n%s", stderr) + } + +} diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go new file mode 100644 index 00000000..355e4c8f --- /dev/null +++ b/integration-cli/docker_cli_history_test.go @@ -0,0 +1,132 @@ +package main + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/go-check/check" +) + +// This is a heisen-test. Because the created timestamp of images and the behavior of +// sort is not predictable it doesn't always fail. +func (s *DockerSuite) TestBuildHistory(c *check.C) { + name := "testbuildhistory" + _, err := buildImage(name, `FROM busybox +RUN echo "A" +RUN echo "B" +RUN echo "C" +RUN echo "D" +RUN echo "E" +RUN echo "F" +RUN echo "G" +RUN echo "H" +RUN echo "I" +RUN echo "J" +RUN echo "K" +RUN echo "L" +RUN echo "M" +RUN echo "N" +RUN echo "O" +RUN echo "P" +RUN echo "Q" +RUN echo "R" +RUN echo "S" +RUN echo "T" +RUN echo "U" +RUN echo "V" +RUN echo "W" +RUN echo "X" +RUN echo "Y" +RUN echo "Z"`, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "history", "testbuildhistory") + actualValues := strings.Split(out, "\n")[1:27] + expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} + + for i := 0; i < 26; i++ { + echoValue := fmt.Sprintf("echo \"%s\"", expectedValues[i]) + actualValue := actualValues[i] + + if !strings.Contains(actualValue, echoValue) { + c.Fatalf("Expected layer \"%s\", but was: %s", expectedValues[i], actualValue) + } + } + +} + +func (s *DockerSuite) TestHistoryExistentImage(c *check.C) { + dockerCmd(c, "history", "busybox") +} + +func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) { + _, _, err := dockerCmdWithError(c, "history", "testHistoryNonExistentImage") + if err == nil { + c.Fatal("history on a non-existent image should fail.") + } +} + +func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) { + name := "testhistoryimagewithcomment" + + // make a image through docker commit [ -m messages ] + + dockerCmd(c, "run", "--name", name, "busybox", "true") + dockerCmd(c, "wait", name) + + comment := "This_is_a_comment" + dockerCmd(c, "commit", "-m="+comment, name, name) + + // test docker history to check comment messages + + out, _ := dockerCmd(c, "history", name) + outputTabs := strings.Fields(strings.Split(out, "\n")[1]) + actualValue := outputTabs[len(outputTabs)-1] + + if !strings.Contains(actualValue, comment) { + c.Fatalf("Expected comments %q, but found %q", comment, actualValue) + } +} + +func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=false", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + if _, err := strconv.Atoi(strings.TrimSpace(sizeString)); err != nil { + c.Fatalf("The size '%s' was not an Integer", sizeString) + } + } +} + +func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=true", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + humanSizeRegex, _ := regexp.Compile("^\\d+.*B$") // Matches human sizes like 10 MB, 3.2 KB, etc + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + if matchSuccess := humanSizeRegex.MatchString(strings.TrimSpace(sizeString)); !matchSuccess { + c.Fatalf("The size '%s' was not in human format", sizeString) + } + } +} diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go new file mode 100644 index 00000000..fe12f02e --- /dev/null +++ b/integration-cli/docker_cli_images_test.go @@ -0,0 +1,155 @@ +package main + +import ( + "fmt" + "reflect" + "sort" + "strings" + "time" + + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { + out, _ := dockerCmd(c, "images") + if !strings.Contains(out, "busybox") { + c.Fatal("images should've listed busybox") + } +} + +func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { + id1, err := buildImage("order:test_a", + `FROM scratch + MAINTAINER dockerio1`, true) + if err != nil { + c.Fatal(err) + } + time.Sleep(time.Second) + id2, err := buildImage("order:test_c", + `FROM scratch + MAINTAINER dockerio2`, true) + if err != nil { + c.Fatal(err) + } + time.Sleep(time.Second) + id3, err := buildImage("order:test_b", + `FROM scratch + MAINTAINER dockerio3`, true) + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc") + imgs := strings.Split(out, "\n") + if imgs[0] != id3 { + c.Fatalf("First image must be %s, got %s", id3, imgs[0]) + } + if imgs[1] != id2 { + c.Fatalf("Second image must be %s, got %s", id2, imgs[1]) + } + if imgs[2] != id1 { + c.Fatalf("Third image must be %s, got %s", id1, imgs[2]) + } +} + +func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) { + out, _, err := dockerCmdWithError(c, "images", "-f", "FOO=123") + if err == nil || !strings.Contains(out, "Invalid filter") { + c.Fatalf("error should occur when listing images with invalid filter name FOO, %s", out) + } +} + +func (s *DockerSuite) TestImagesFilterLabel(c *check.C) { + imageName1 := "images_filter_test1" + imageName2 := "images_filter_test2" + imageName3 := "images_filter_test3" + image1ID, err := buildImage(imageName1, + `FROM scratch + LABEL match me`, true) + if err != nil { + c.Fatal(err) + } + + image2ID, err := buildImage(imageName2, + `FROM scratch + LABEL match="me too"`, true) + if err != nil { + c.Fatal(err) + } + + image3ID, err := buildImage(imageName3, + `FROM scratch + LABEL nomatch me`, true) + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") + out = strings.TrimSpace(out) + if (!strings.Contains(out, image1ID) && !strings.Contains(out, image2ID)) || strings.Contains(out, image3ID) { + c.Fatalf("Expected ids %s,%s got %s", image1ID, image2ID, out) + } + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match=me too") + out = strings.TrimSpace(out) + if out != image2ID { + c.Fatalf("Expected %s got %s", image2ID, out) + } +} + +func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { + imageName := "images_filter_test" + buildImage(imageName, + `FROM scratch + RUN touch /test/foo + RUN touch /test/bar + RUN touch /test/baz`, true) + + filters := []string{ + "dangling=true", + "Dangling=true", + " dangling=true", + "dangling=true ", + "dangling = true", + } + + imageListings := make([][]string, 5, 5) + for idx, filter := range filters { + out, _ := dockerCmd(c, "images", "-q", "-f", filter) + listing := strings.Split(out, "\n") + sort.Strings(listing) + imageListings[idx] = listing + } + + for idx, listing := range imageListings { + if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { + for idx, errListing := range imageListings { + fmt.Printf("out %d", idx) + for _, image := range errListing { + fmt.Print(image) + } + fmt.Print("") + } + c.Fatalf("All output must be the same") + } + } +} + +func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "foobox") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + // overwrite the tag, making the previous image dangling + dockerCmd(c, "tag", "-f", "busybox", "foobox") + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true") + if e, a := 1, strings.Count(out, imageID); e != a { + c.Fatalf("expected 1 dangling image, got %d: %s", a, out) + } +} diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go new file mode 100644 index 00000000..ccfc452d --- /dev/null +++ b/integration-cli/docker_cli_import_test.go @@ -0,0 +1,80 @@ +package main + +import ( + "bufio" + "io/ioutil" + "os" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImportDisplay(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + if err != nil { + c.Errorf("import failed with errors: %v, output: %q", err, out) + } + + if n := strings.Count(out, "\n"); n != 1 { + c.Fatalf("display is messed up: %d '\\n' instead of 1:\n%s", n, out) + } + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + if out != "" { + c.Fatalf("command output should've been nothing, was %q", out) + } +} + +func (s *DockerSuite) TestImportBadURL(c *check.C) { + out, _, err := dockerCmdWithError(c, "import", "http://nourl/bad") + if err == nil { + c.Fatal("import was supposed to fail but didn't") + } + if !strings.Contains(out, "dial tcp") { + c.Fatalf("expected an error msg but didn't get one:\n%s", out) + } +} + +func (s *DockerSuite) TestImportFile(c *check.C) { + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + if err != nil { + c.Fatal("failed to create temporary file", "", err) + } + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + runCmd.Stdout = bufio.NewWriter(temporaryFile) + + _, err = runCommand(runCmd) + if err != nil { + c.Fatal("failed to export a container", err) + } + + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + if n := strings.Count(out, "\n"); n != 1 { + c.Fatalf("display is messed up: %d '\\n' instead of 1:\n%s", n, out) + } + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + if out != "" { + c.Fatalf("command output should've been nothing, was %q", out) + } +} + +func (s *DockerSuite) TestImportFileNonExistentFile(c *check.C) { + _, exitCode, err := dockerCmdWithError(c, "import", "example.com/myImage.tar") + if exitCode == 0 || err == nil { + c.Fatalf("import non-existing file must failed") + } +} diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go new file mode 100644 index 00000000..86719f4e --- /dev/null +++ b/integration-cli/docker_cli_info_test.go @@ -0,0 +1,37 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/utils" + "github.com/go-check/check" +) + +// ensure docker info succeeds +func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "info") + + // always shown fields + stringsToCheck := []string{ + "ID:", + "Containers:", + "Images:", + "Execution Driver:", + "Logging Driver:", + "Operating System:", + "CPUs:", + "Total Memory:", + "Kernel Version:", + "Storage Driver:", + } + + if utils.ExperimentalBuild() { + stringsToCheck = append(stringsToCheck, "Experimental: true") + } + + for _, linePrefix := range stringsToCheck { + if !strings.Contains(out, linePrefix) { + c.Errorf("couldn't find string %v in output", linePrefix) + } + } +} diff --git a/integration-cli/docker_cli_inspect_experimental_test.go b/integration-cli/docker_cli_inspect_experimental_test.go new file mode 100644 index 00000000..fada86e6 --- /dev/null +++ b/integration-cli/docker_cli_inspect_experimental_test.go @@ -0,0 +1,44 @@ +// +build experimental + +package main + +import ( + "github.com/docker/docker/api/types" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectNamedMountPoint(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "test", "-v", "data:/data", "busybox", "cat") + + vol, err := inspectFieldJSON("test", "Mounts") + c.Assert(err, check.IsNil) + + var mp []types.MountPoint + err = unmarshalJSON([]byte(vol), &mp) + c.Assert(err, check.IsNil) + + if len(mp) != 1 { + c.Fatalf("Expected 1 mount point, was %v\n", len(mp)) + } + + m := mp[0] + if m.Name != "data" { + c.Fatalf("Expected name data, was %s\n", m.Name) + } + + if m.Driver != "local" { + c.Fatalf("Expected driver local, was %s\n", m.Driver) + } + + if m.Source == "" { + c.Fatalf("Expected source to not be empty") + } + + if m.RW != true { + c.Fatalf("Expected rw to be true") + } + + if m.Destination != "/data" { + c.Fatalf("Expected destination /data, was %s\n", m.Destination) + } +} diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go new file mode 100644 index 00000000..3e42d0c3 --- /dev/null +++ b/integration-cli/docker_cli_inspect_test.go @@ -0,0 +1,288 @@ +package main + +import ( + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectImage(c *check.C) { + imageTest := "emptyfs" + imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + id, err := inspectField(imageTest, "Id") + c.Assert(err, check.IsNil) + + if id != imageTestID { + c.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id) + } +} + +func (s *DockerSuite) TestInspectInt64(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-d", "-m=300M", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + out = strings.TrimSpace(out) + + inspectOut, err := inspectField(out, "HostConfig.Memory") + c.Assert(err, check.IsNil) + + if inspectOut != "314572800" { + c.Fatalf("inspect got wrong value, got: %q, expected: 314572800", inspectOut) + } +} + +func (s *DockerSuite) TestInspectDefault(c *check.C) { + + //Both the container and image are named busybox. docker inspect will fetch the container JSON. + //If the container JSON is not available, it will go for the image JSON. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + dockerCmd(c, "inspect", "busybox") +} + +func (s *DockerSuite) TestInspectTypeFlagContainer(c *check.C) { + + //Both the container and image are named busybox. docker inspect will fetch container + //JSON State.Running field. If the field is true, it's a container. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top") + + formatStr := fmt.Sprintf("--format='{{.State.Running}}'") + out, exitCode, err := dockerCmdWithError(c, "inspect", "--type=container", formatStr, "busybox") + if exitCode != 0 || err != nil { + c.Fatalf("failed to inspect container: %s, %v", out, err) + } + + if out != "true\n" { + c.Fatal("not a container JSON") + } +} + +func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *check.C) { + + //Run this test on an image named busybox. docker inspect will try to fetch container + //JSON. Since there is no container named busybox and --type=container, docker inspect will + //not try to get the image JSON. It will throw an error. + + dockerCmd(c, "run", "-d", "busybox", "true") + + _, exitCode, err := dockerCmdWithError(c, "inspect", "--type=container", "busybox") + if exitCode == 0 || err == nil { + c.Fatalf("docker inspect should have failed, as there is no container named busybox") + } +} + +func (s *DockerSuite) TestInspectTypeFlagWithImage(c *check.C) { + + //Both the container and image are named busybox. docker inspect will fetch image + //JSON as --type=image. if there is no image with name busybox, docker inspect + //will throw an error. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, exitCode, err := dockerCmdWithError(c, "inspect", "--type=image", "busybox") + if exitCode != 0 || err != nil { + c.Fatalf("failed to inspect image: %s, %v", out, err) + } + + if strings.Contains(out, "State") { + c.Fatal("not an image JSON") + } +} + +func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *check.C) { + + //Both the container and image are named busybox. docker inspect will fail + //as --type=foobar is not a valid value for the flag. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, exitCode, err := dockerCmdWithError(c, "inspect", "--type=foobar", "busybox") + if exitCode != 0 || err != nil { + if !strings.Contains(out, "not a valid value for --type") { + c.Fatalf("failed to inspect image: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { + imageTest := "emptyfs" + out, err := inspectField(imageTest, "Size") + c.Assert(err, check.IsNil) + + size, err := strconv.Atoi(out) + if err != nil { + c.Fatalf("failed to inspect size of the image: %s, %v", out, err) + } + + //now see if the size turns out to be the same + formatStr := fmt.Sprintf("--format='{{eq .Size %d}}'", size) + out, exitCode, err := dockerCmdWithError(c, "inspect", formatStr, imageTest) + if exitCode != 0 || err != nil { + c.Fatalf("failed to inspect image: %s, %v", out, err) + } + if result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")); err != nil || !result { + c.Fatalf("Expected size: %d for image: %s but received size: %s", size, imageTest, strings.TrimSuffix(out, "\n")) + } +} + +func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") + runCmd.Stdin = strings.NewReader("blahblah") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + id := strings.TrimSpace(out) + + out, err = inspectField(id, "State.ExitCode") + c.Assert(err, check.IsNil) + + exitCode, err := strconv.Atoi(out) + if err != nil { + c.Fatalf("failed to inspect exitcode of the container: %s, %v", out, err) + } + + //now get the exit code to verify + formatStr := fmt.Sprintf("--format='{{eq .State.ExitCode %d}}'", exitCode) + out, _ = dockerCmd(c, "inspect", formatStr, id) + if result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")); err != nil || !result { + c.Fatalf("Expected exitcode: %d for container: %s", exitCode, id) + } +} + +func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) { + imageTest := "emptyfs" + name, err := inspectField(imageTest, "GraphDriver.Name") + c.Assert(err, check.IsNil) + + if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" { + c.Fatalf("%v is not a valid graph driver name", name) + } + + if name != "devicemapper" { + return + } + + deviceID, err := inspectField(imageTest, "GraphDriver.Data.DeviceId") + c.Assert(err, check.IsNil) + + _, err = strconv.Atoi(deviceID) + if err != nil { + c.Fatalf("failed to inspect DeviceId of the image: %s, %v", deviceID, err) + } + + deviceSize, err := inspectField(imageTest, "GraphDriver.Data.DeviceSize") + c.Assert(err, check.IsNil) + + _, err = strconv.ParseUint(deviceSize, 10, 64) + if err != nil { + c.Fatalf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err) + } +} + +func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + out = strings.TrimSpace(out) + + name, err := inspectField(out, "GraphDriver.Name") + c.Assert(err, check.IsNil) + + if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" { + c.Fatalf("%v is not a valid graph driver name", name) + } + + if name != "devicemapper" { + return + } + + deviceID, err := inspectField(out, "GraphDriver.Data.DeviceId") + c.Assert(err, check.IsNil) + + _, err = strconv.Atoi(deviceID) + if err != nil { + c.Fatalf("failed to inspect DeviceId of the image: %s, %v", deviceID, err) + } + + deviceSize, err := inspectField(out, "GraphDriver.Data.DeviceSize") + c.Assert(err, check.IsNil) + + _, err = strconv.ParseUint(deviceSize, 10, 64) + if err != nil { + c.Fatalf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err) + } +} + +func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "test", "-v", "/data:/data:ro,z", "busybox", "cat") + + vol, err := inspectFieldJSON("test", "Mounts") + c.Assert(err, check.IsNil) + + var mp []types.MountPoint + err = unmarshalJSON([]byte(vol), &mp) + c.Assert(err, check.IsNil) + + if len(mp) != 1 { + c.Fatalf("Expected 1 mount point, was %v\n", len(mp)) + } + + m := mp[0] + + if m.Name != "" { + c.Fatal("Expected name to be empty") + } + + if m.Driver != "" { + c.Fatal("Expected driver to be empty") + } + + if m.Source != "/data" { + c.Fatalf("Expected source /data, was %s\n", m.Source) + } + + if m.Destination != "/data" { + c.Fatalf("Expected destination /data, was %s\n", m.Destination) + } + + if m.Mode != "ro,z" { + c.Fatalf("Expected mode `ro,z`, was %s\n", m.Mode) + } + + if m.RW != false { + c.Fatalf("Expected rw to be false") + } +} + +// #14947 +func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + id := strings.TrimSpace(out) + startedAt, err := inspectField(id, "State.StartedAt") + c.Assert(err, check.IsNil) + finishedAt, err := inspectField(id, "State.FinishedAt") + c.Assert(err, check.IsNil) + created, err := inspectField(id, "Created") + c.Assert(err, check.IsNil) + + _, err = time.Parse(time.RFC3339Nano, startedAt) + c.Assert(err, check.IsNil) + _, err = time.Parse(time.RFC3339Nano, finishedAt) + c.Assert(err, check.IsNil) + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, check.IsNil) + + created, err = inspectField("busybox", "Created") + c.Assert(err, check.IsNil) + + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, check.IsNil) +} diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go new file mode 100644 index 00000000..685f4f5e --- /dev/null +++ b/integration-cli/docker_cli_kill_test.go @@ -0,0 +1,100 @@ +package main + +import ( + "fmt" + "net/http" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestKillContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + dockerCmd(c, "kill", cleanedContainerID) + + out, _ = dockerCmd(c, "ps", "-q") + if strings.Contains(out, cleanedContainerID) { + c.Fatal("killed container is still running") + } +} + +func (s *DockerSuite) TestKillofStoppedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "stop", cleanedContainerID) + + _, _, err := dockerCmdWithError(c, "kill", "-s", "30", cleanedContainerID) + c.Assert(err, check.Not(check.IsNil), check.Commentf("Container %s is not running", cleanedContainerID)) +} + +func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + dockerCmd(c, "kill", cleanedContainerID) + + out, _ = dockerCmd(c, "ps", "-q") + if strings.Contains(out, cleanedContainerID) { + c.Fatal("killed container is still running") + } +} + +// regression test about correct signal parsing see #13665 +func (s *DockerSuite) TestKillWithSignal(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + dockerCmd(c, "kill", "-s", "SIGWINCH", cid) + + running, _ := inspectField(cid, "State.Running") + if running != "true" { + c.Fatal("Container should be in running state after SIGWINCH") + } +} + +func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err := dockerCmdWithError(c, "kill", "-s", "0", cid) + c.Assert(err, check.NotNil) + if !strings.ContainsAny(out, "Invalid signal: 0") { + c.Fatal("Kill with an invalid signal didn't error out correctly") + } + + running, _ := inspectField(cid, "State.Running") + if running != "true" { + c.Fatal("Container should be in running state after an invalid signal") + } + + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + cid = strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err = dockerCmdWithError(c, "kill", "-s", "SIG42", cid) + c.Assert(err, check.NotNil) + if !strings.ContainsAny(out, "Invalid signal: SIG42") { + c.Fatal("Kill with an invalid signal error out correctly") + } + + running, _ = inspectField(cid, "State.Running") + if running != "true" { + c.Fatal("Container should be in running state after an invalid signal") + } +} + +func (s *DockerSuite) TestKillofStoppedContainerAPIPre120(c *check.C) { + dockerCmd(c, "run", "--name", "docker-kill-test-api", "-d", "busybox", "top") + dockerCmd(c, "stop", "docker-kill-test-api") + + status, _, err := sockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) +} diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go new file mode 100644 index 00000000..568f20a9 --- /dev/null +++ b/integration-cli/docker_cli_links_test.go @@ -0,0 +1,228 @@ +package main + +import ( + "fmt" + "github.com/go-check/check" + "reflect" + "regexp" + "strings" + "time" +) + +func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) { + + _, exitCode, err := dockerCmdWithError(c, "run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + + if exitCode == 0 { + c.Fatal("run ping did not fail") + } else if exitCode != 1 { + c.Fatalf("run ping failed with errors: %v", err) + } + +} + +// Test for appropriate error when calling --link with an invalid target container +func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { + + out, _, err := dockerCmdWithError(c, "run", "--link", "bogus:alias", "busybox", "true") + + if err == nil { + c.Fatal("an invalid container target should produce an error") + } + if !strings.Contains(out, "Could not get container") { + c.Fatalf("error output expected 'Could not get container', but got %q instead; err: %v", out, err) + } + +} + +func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { + + dockerCmd(c, "run", "-d", "--name", "container1", "--hostname", "fred", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "--hostname", "wilma", "busybox", "top") + + runArgs := []string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c"} + pingCmd := "ping -c 1 %s -W 1 && ping -c 1 %s -W 1" + + // test ping by alias, ping by name, and ping by hostname + // 1. Ping by alias + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) + // 2. Ping by container name + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) + // 3. Ping by hostname + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) + +} + +func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { + + out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + dockerCmd(c, "kill", idA) + dockerCmd(c, "kill", idB) + +} + +func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") + links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") + if err != nil { + c.Fatal(err) + } + + err = unmarshalJSON([]byte(links), &result) + if err != nil { + c.Fatal(err) + } + + output := convertSliceOfStringsToMap(result) + + equal := reflect.DeepEqual(output, expected) + + if !equal { + c.Fatalf("Links %s, expected %s", result, expected) + } +} + +func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") + if err != nil { + c.Fatal(err) + } + + err = unmarshalJSON([]byte(links), &result) + if err != nil { + c.Fatal(err) + } + + output := convertSliceOfStringsToMap(result) + + equal := reflect.DeepEqual(output, expected) + + if !equal { + c.Fatalf("Links %s, but expected %s", result, expected) + } + +} + +func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { + + dockerCmd(c, "create", "--name=first", "busybox", "top") + dockerCmd(c, "create", "--name=second", "--link=first:first", "busybox", "top") + dockerCmd(c, "start", "first") + +} + +func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { + testRequires(c, SameHostDaemon, ExecSupport) + + out, _ := dockerCmd(c, "run", "-itd", "--name", "one", "busybox", "top") + idOne := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top") + idTwo := strings.TrimSpace(out) + + time.Sleep(1 * time.Second) + + contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts") + if err != nil { + c.Fatal(err, string(contentOne)) + } + + contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts") + if err != nil { + c.Fatal(err, string(contentTwo)) + } + + if !strings.Contains(string(contentTwo), "onetwo") { + c.Fatal("Host is not present in updated hosts file", string(contentTwo)) + } + +} + +func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { + testRequires(c, SameHostDaemon, ExecSupport) + dockerCmd(c, "run", "-d", "--name", "one", "busybox", "top") + out, _ := dockerCmd(c, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top") + id := strings.TrimSpace(string(out)) + + realIP, err := inspectField("one", "NetworkSettings.IPAddress") + if err != nil { + c.Fatal(err) + } + content, err := readContainerFileWithExec(id, "/etc/hosts") + if err != nil { + c.Fatal(err, string(content)) + } + getIP := func(hosts []byte, hostname string) string { + re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) + matches := re.FindSubmatch(hosts) + if matches == nil { + c.Fatalf("Hostname %s have no matches in hosts", hostname) + } + return string(matches[1]) + } + if ip := getIP(content, "one"); ip != realIP { + c.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip) + } + if ip := getIP(content, "onetwo"); ip != realIP { + c.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip) + } + dockerCmd(c, "restart", "one") + realIP, err = inspectField("one", "NetworkSettings.IPAddress") + if err != nil { + c.Fatal(err) + } + content, err = readContainerFileWithExec(id, "/etc/hosts") + if err != nil { + c.Fatal(err, string(content)) + } + if ip := getIP(content, "one"); ip != realIP { + c.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip) + } + if ip := getIP(content, "onetwo"); ip != realIP { + c.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip) + } +} + +func (s *DockerSuite) TestLinksEnvs(c *check.C) { + dockerCmd(c, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top") + out, _ := dockerCmd(c, "run", "--name=second", "--link=first:first", "busybox", "env") + if !strings.Contains(out, "FIRST_ENV_e1=\n") || + !strings.Contains(out, "FIRST_ENV_e2=v2") || + !strings.Contains(out, "FIRST_ENV_e3=v3=v3") { + c.Fatalf("Incorrect output: %s", out) + } +} + +func (s *DockerSuite) TestLinkShortDefinition(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--name", "shortlinkdef", "busybox", "top") + + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _ = dockerCmd(c, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top") + + cid2 := strings.TrimSpace(out) + c.Assert(waitRun(cid2), check.IsNil) + + links, err := inspectFieldJSON(cid2, "HostConfig.Links") + c.Assert(err, check.IsNil) + c.Assert(links, check.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]") +} diff --git a/integration-cli/docker_cli_links_unix_test.go b/integration-cli/docker_cli_links_unix_test.go new file mode 100644 index 00000000..67a44643 --- /dev/null +++ b/integration-cli/docker_cli_links_unix_test.go @@ -0,0 +1,42 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + if !strings.HasPrefix(out, "-") { + c.Errorf("/etc/hosts should be a regular file") + } +} + +func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hosts") + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + c.Skip("/etc/hosts does not exist, skip this test") + } + + if out != string(hosts) { + c.Errorf("container: %s\n\nhost:%s", out, hosts) + } + +} + +func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) { + dockerCmd(c, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top") + out, _, err := dockerCmdWithError(c, "run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true") + if err == nil || !strings.Contains(out, "--net=host can't be used with links. This would result in undefined behavior") { + c.Fatalf("Running container linking to a container with --net host should have failed: %s", out) + } + +} diff --git a/integration-cli/docker_cli_login_test.go b/integration-cli/docker_cli_login_test.go new file mode 100644 index 00000000..3b4431d2 --- /dev/null +++ b/integration-cli/docker_cli_login_test.go @@ -0,0 +1,21 @@ +package main + +import ( + "bytes" + "os/exec" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { + cmd := exec.Command(dockerBinary, "login") + + // Send to stdin so the process does not get the TTY + cmd.Stdin = bytes.NewBufferString("buffer test string \n") + + // run the command and block until it's done + if err := cmd.Run(); err == nil { + c.Fatal("Expected non nil err when loginning in & TTY not available") + } + +} diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go new file mode 100644 index 00000000..6c942177 --- /dev/null +++ b/integration-cli/docker_cli_logs_test.go @@ -0,0 +1,363 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "os/exec" + "regexp" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/timeutils" + "github.com/go-check/check" +) + +// This used to work, it test a log of PageSize-1 (gh#4851) +func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { + testLen := 32767 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + out, _ = dockerCmd(c, "logs", cleanedContainerID) + if len(out) != testLen+1 { + c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } +} + +// Regression test: When going over the PageSize, it used to panic (gh#4851) +func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { + testLen := 32768 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + + if len(out) != testLen+1 { + c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } +} + +// Regression test: When going much over the PageSize, it used to block (gh#4851) +func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { + testLen := 33000 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + + if len(out) != testLen+1 { + c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } +} + +func (s *DockerSuite) TestLogsTimestamps(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", "-t", cleanedContainerID) + + lines := strings.Split(out, "\n") + + if len(lines) != testLen+1 { + c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } + + ts := regexp.MustCompile(`^.* `) + + for _, l := range lines { + if l != "" { + _, err := time.Parse(timeutils.RFC3339NanoFixed+" ", ts.FindString(l)) + if err != nil { + c.Fatalf("Failed to parse timestamp from %v: %v", l, err) + } + if l[29] != 'Z' { // ensure we have padded 0's + c.Fatalf("Timestamp isn't padded properly: %s", l) + } + } + } +} + +func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { + msg := "stderr_log" + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", cleanedContainerID) + + if stdout != "" { + c.Fatalf("Expected empty stdout stream, got %v", stdout) + } + + stderr = strings.TrimSpace(stderr) + if stderr != msg { + c.Fatalf("Expected %v in stderr stream, got %v", msg, stderr) + } +} + +func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { + msg := "stderr_log" + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", cleanedContainerID) + if stderr != "" { + c.Fatalf("Expected empty stderr stream, got %v", stderr) + } + + stdout = strings.TrimSpace(stdout) + if stdout != msg { + c.Fatalf("Expected %v in stdout stream, got %v", msg, stdout) + } +} + +func (s *DockerSuite) TestLogsTail(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", "--tail", "5", cleanedContainerID) + + lines := strings.Split(out, "\n") + + if len(lines) != 6 { + c.Fatalf("Expected log %d lines, received %d\n", 6, len(lines)) + } + out, _ = dockerCmd(c, "logs", "--tail", "all", cleanedContainerID) + + lines = strings.Split(out, "\n") + + if len(lines) != testLen+1 { + c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } + out, _, _ = dockerCmdWithStdoutStderr(c, "logs", "--tail", "random", cleanedContainerID) + + lines = strings.Split(out, "\n") + + if len(lines) != testLen+1 { + c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } +} + +func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", "hello") + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + logsCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) + if err := logsCmd.Start(); err != nil { + c.Fatal(err) + } + + errChan := make(chan error) + go func() { + errChan <- logsCmd.Wait() + close(errChan) + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(1 * time.Second): + c.Fatal("Following logs is hanged") + } +} + +func (s *DockerSuite) TestLogsSince(c *check.C) { + name := "testlogssince" + out, _ := dockerCmd(c, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo `date +%s` log$i; done") + + log2Line := strings.Split(strings.Split(out, "\n")[1], " ") + t, err := strconv.ParseInt(log2Line[0], 10, 64) // the timestamp log2 is writen + c.Assert(err, check.IsNil) + since := t + 1 // add 1s so log1 & log2 doesn't show up + out, _ = dockerCmd(c, "logs", "-t", fmt.Sprintf("--since=%v", since), name) + + // Skip 2 seconds + unexpected := []string{"log1", "log2"} + for _, v := range unexpected { + if strings.Contains(out, v) { + c.Fatalf("unexpected log message returned=%v, since=%v\nout=%v", v, since, out) + } + } + // Test with default value specified and parameter omitted + expected := []string{"log1", "log2", "log3"} + for _, cmd := range []*exec.Cmd{ + exec.Command(dockerBinary, "logs", "-t", name), + exec.Command(dockerBinary, "logs", "-t", "--since=0", name), + } { + out, _, err = runCommandWithOutput(cmd) + if err != nil { + c.Fatalf("failed to log container: %s, %v", out, err) + } + for _, v := range expected { + if !strings.Contains(out, v) { + c.Fatalf("'%v' does not contain=%v\nout=%s", cmd.Args, v, out) + } + } + } +} + +func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do date +%s; sleep 1; done`) + cleanedContainerID := strings.TrimSpace(out) + + now := daemonTime(c).Unix() + since := now + 2 + out, _ = dockerCmd(c, "logs", "-f", fmt.Sprintf("--since=%v", since), cleanedContainerID) + lines := strings.Split(strings.TrimSpace(out), "\n") + if len(lines) == 0 { + c.Fatal("got no log lines") + } + for _, v := range lines { + ts, err := strconv.ParseInt(v, 10, 64) + if err != nil { + c.Fatalf("cannot parse timestamp output from log: '%v'\nout=%s", v, out) + } + if ts < since { + c.Fatalf("earlier log found. since=%v logdate=%v", since, ts) + } + } +} + +// Regression test for #8832 +func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 200000;yes X | head -c 200000`) + + cleanedContainerID := strings.TrimSpace(out) + + stopSlowRead := make(chan bool) + + go func() { + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + stopSlowRead <- true + }() + + logCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) + stdout, err := logCmd.StdoutPipe() + c.Assert(err, check.IsNil) + c.Assert(logCmd.Start(), check.IsNil) + + // First read slowly + bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) + c.Assert(err, check.IsNil) + + // After the container has finished we can continue reading fast + bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) + c.Assert(err, check.IsNil) + + actual := bytes1 + bytes2 + expected := 200000 + if actual != expected { + c.Fatalf("Invalid bytes read: %d, expected %d", actual, expected) + } + +} + +func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + type info struct { + NGoroutines int + } + getNGoroutines := func() int { + var i info + status, b, err := sockRequest("GET", "/info", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, 200) + c.Assert(json.Unmarshal(b, &i), check.IsNil) + return i.NGoroutines + } + + nroutines := getNGoroutines() + + cmd := exec.Command(dockerBinary, "logs", "-f", id) + r, w := io.Pipe() + cmd.Stdout = w + c.Assert(cmd.Start(), check.IsNil) + + // Make sure pipe is written to + chErr := make(chan error) + go func() { + b := make([]byte, 1) + _, err := r.Read(b) + chErr <- err + }() + c.Assert(<-chErr, check.IsNil) + c.Assert(cmd.Process.Kill(), check.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + t := time.After(30 * time.Second) + for { + select { + case <-t: + if n := getNGoroutines(); n > nroutines { + c.Fatalf("leaked goroutines: expected less than or equal to %d, got: %d", nroutines, n) + } + default: + if n := getNGoroutines(); n <= nroutines { + return + } + time.Sleep(200 * time.Millisecond) + } + } +} + +func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + type info struct { + NGoroutines int + } + getNGoroutines := func() int { + var i info + status, b, err := sockRequest("GET", "/info", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, 200) + c.Assert(json.Unmarshal(b, &i), check.IsNil) + return i.NGoroutines + } + + nroutines := getNGoroutines() + + cmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(cmd.Start(), check.IsNil) + time.Sleep(200 * time.Millisecond) + c.Assert(cmd.Process.Kill(), check.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + t := time.After(30 * time.Second) + for { + select { + case <-t: + if n := getNGoroutines(); n > nroutines { + c.Fatalf("leaked goroutines: expected less than or equal to %d, got: %d", nroutines, n) + } + default: + if n := getNGoroutines(); n <= nroutines { + return + } + time.Sleep(200 * time.Millisecond) + } + } +} diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go new file mode 100644 index 00000000..a0773fef --- /dev/null +++ b/integration-cli/docker_cli_nat_test.go @@ -0,0 +1,108 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/go-check/check" +) + +func startServerContainer(c *check.C, msg string, port int) string { + name := "server" + cmd := []string{ + "-d", + "-p", fmt.Sprintf("%d:%d", port, port), + "busybox", + "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), + } + if err := waitForContainer(name, cmd...); err != nil { + c.Fatalf("Failed to launch server container: %v", err) + } + return name +} + +func getExternalAddress(c *check.C) net.IP { + iface, err := net.InterfaceByName("eth0") + if err != nil { + c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) + } + + ifaceAddrs, err := iface.Addrs() + if err != nil || len(ifaceAddrs) == 0 { + c.Fatalf("Error retrieving addresses for eth0: %v (%d addresses)", err, len(ifaceAddrs)) + } + + ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) + if err != nil { + c.Fatalf("Error retrieving the up for eth0: %s", err) + } + + return ifaceIP +} + +func getContainerLogs(c *check.C, containerID string) string { + out, _ := dockerCmd(c, "logs", containerID) + return strings.Trim(out, "\r\n") +} + +func getContainerStatus(c *check.C, containerID string) string { + out, err := inspectField(containerID, "State.Running") + c.Assert(err, check.IsNil) + return out +} + +func (s *DockerSuite) TestNetworkNat(c *check.C) { + testRequires(c, SameHostDaemon, NativeExecDriver) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) + if err != nil { + c.Fatalf("Failed to connect to container (%v)", err) + } + data, err := ioutil.ReadAll(conn) + conn.Close() + if err != nil { + c.Fatal(err) + } + final := strings.TrimRight(string(data), "\n") + if final != msg { + c.Fatalf("Expected message %q but received %q", msg, final) + } +} + +func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) { + testRequires(c, SameHostDaemon, NativeExecDriver) + var ( + msg = "hi yall" + ) + startServerContainer(c, msg, 8081) + conn, err := net.Dial("tcp", "localhost:8081") + if err != nil { + c.Fatalf("Failed to connect to container (%v)", err) + } + data, err := ioutil.ReadAll(conn) + conn.Close() + if err != nil { + c.Fatal(err) + } + final := strings.TrimRight(string(data), "\n") + if final != msg { + c.Fatalf("Expected message %q but received %q", msg, final) + } +} + +func (s *DockerSuite) TestNetworkLoopbackNat(c *check.C) { + testRequires(c, SameHostDaemon, NativeExecDriver) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + out, _ := dockerCmd(c, "run", "-t", "--net=container:server", "busybox", + "sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())) + final := strings.TrimRight(string(out), "\n") + if final != msg { + c.Fatalf("Expected message %q but received %q", msg, final) + } +} diff --git a/integration-cli/docker_cli_network_test.go b/integration-cli/docker_cli_network_test.go new file mode 100644 index 00000000..08b225d2 --- /dev/null +++ b/integration-cli/docker_cli_network_test.go @@ -0,0 +1,47 @@ +// +build experimental + +package main + +import ( + "strings" + + "github.com/go-check/check" +) + +func assertNwIsAvailable(c *check.C, name string) { + if !isNwPresent(c, name) { + c.Fatalf("Network %s not found in network ls o/p", name) + } +} + +func assertNwNotAvailable(c *check.C, name string) { + if isNwPresent(c, name) { + c.Fatalf("Found network %s in network ls o/p", name) + } +} + +func isNwPresent(c *check.C, name string) bool { + out, _ := dockerCmd(c, "network", "ls") + lines := strings.Split(out, "\n") + for i := 1; i < len(lines)-1; i++ { + if strings.Contains(lines[i], name) { + return true + } + } + return false +} + +func (s *DockerSuite) TestDockerNetworkLsDefault(c *check.C) { + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + assertNwIsAvailable(c, nn) + } +} + +func (s *DockerSuite) TestDockerNetworkCreateDelete(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} diff --git a/integration-cli/docker_cli_pause_test.go b/integration-cli/docker_cli_pause_test.go new file mode 100644 index 00000000..4e32dfc1 --- /dev/null +++ b/integration-cli/docker_cli_pause_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPause(c *check.C) { + defer unpauseAllContainers() + + name := "testeventpause" + out, _ := dockerCmd(c, "images", "-q") + image := strings.Split(out, "\n")[0] + dockerCmd(c, "run", "-d", "--name", name, image, "top") + + dockerCmd(c, "pause", name) + pausedContainers, err := getSliceOfPausedContainers() + if err != nil { + c.Fatalf("error thrown while checking if containers were paused: %v", err) + } + if len(pausedContainers) != 1 { + c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) + } + + dockerCmd(c, "unpause", name) + + out, _ = dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + events := strings.Split(out, "\n") + if len(events) <= 1 { + c.Fatalf("Missing expected event") + } + + pauseEvent := strings.Fields(events[len(events)-3]) + unpauseEvent := strings.Fields(events[len(events)-2]) + + if pauseEvent[len(pauseEvent)-1] != "pause" { + c.Fatalf("event should be pause, not %#v", pauseEvent) + } + if unpauseEvent[len(unpauseEvent)-1] != "unpause" { + c.Fatalf("event should be unpause, not %#v", unpauseEvent) + } + +} + +func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { + defer unpauseAllContainers() + + containers := []string{ + "testpausewithmorecontainers1", + "testpausewithmorecontainers2", + } + out, _ := dockerCmd(c, "images", "-q") + image := strings.Split(out, "\n")[0] + for _, name := range containers { + dockerCmd(c, "run", "-d", "--name", name, image, "top") + } + dockerCmd(c, append([]string{"pause"}, containers...)...) + pausedContainers, err := getSliceOfPausedContainers() + if err != nil { + c.Fatalf("error thrown while checking if containers were paused: %v", err) + } + if len(pausedContainers) != len(containers) { + c.Fatalf("there should be %d paused container and not %d", len(containers), len(pausedContainers)) + } + + dockerCmd(c, append([]string{"unpause"}, containers...)...) + + out, _ = dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) + events := strings.Split(out, "\n") + if len(events) <= len(containers)*3-2 { + c.Fatalf("Missing expected event") + } + + pauseEvents := make([][]string, len(containers)) + unpauseEvents := make([][]string, len(containers)) + for i := range containers { + pauseEvents[i] = strings.Fields(events[len(events)-len(containers)*2-1+i]) + unpauseEvents[i] = strings.Fields(events[len(events)-len(containers)-1+i]) + } + + for _, pauseEvent := range pauseEvents { + if pauseEvent[len(pauseEvent)-1] != "pause" { + c.Fatalf("event should be pause, not %#v", pauseEvent) + } + } + for _, unpauseEvent := range unpauseEvents { + if unpauseEvent[len(unpauseEvent)-1] != "unpause" { + c.Fatalf("event should be unpause, not %#v", unpauseEvent) + } + } + +} diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go new file mode 100644 index 00000000..63bfc9a0 --- /dev/null +++ b/integration-cli/docker_cli_port_test.go @@ -0,0 +1,172 @@ +package main + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPortList(c *check.C) { + + // one port + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + if !assertPortList(c, out, []string{"0.0.0.0:9876"}) { + c.Error("Port list is not correct") + } + + out, _ = dockerCmd(c, "port", firstID) + + if !assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) { + c.Error("Port list is not correct") + } + dockerCmd(c, "rm", "-f", firstID) + + // three port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + if !assertPortList(c, out, []string{"0.0.0.0:9876"}) { + c.Error("Port list is not correct") + } + + out, _ = dockerCmd(c, "port", ID) + + if !assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) { + c.Error("Port list is not correct") + } + dockerCmd(c, "rm", "-f", ID) + + // more and one port mapped to the same container port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9999:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + if !assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) { + c.Error("Port list is not correct") + } + + out, _ = dockerCmd(c, "port", ID) + + if !assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "80/tcp -> 0.0.0.0:9999", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) { + c.Error("Port list is not correct\n", out) + } + dockerCmd(c, "rm", "-f", ID) + +} + +func assertPortList(c *check.C, out string, expected []string) bool { + //lines := strings.Split(out, "\n") + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines) != len(expected) { + c.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) + return false + } + sort.Strings(lines) + sort.Strings(expected) + + for i := 0; i < len(expected); i++ { + if lines[i] != expected[i] { + c.Error("|" + lines[i] + "!=" + expected[i] + "|") + return false + } + } + + return true +} + +func stopRemoveContainer(id string, c *check.C) { + dockerCmd(c, "rm", "-f", id) +} + +func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) { + // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + if !strings.Contains(out, unpPort1) || !strings.Contains(out, unpPort2) { + c.Errorf("Missing unpublished ports(s) (%s, %s) in docker ps output: %s", unpPort1, unpPort2, out) + } + + // Run the container forcing to publish the exposed ports + dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the exposed ports in the port bindings + expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) + expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + if !expBndRegx1.MatchString(out) || !expBndRegx2.MatchString(out) { + c.Errorf("Cannot find expected port binding ports(s) (0.0.0.0:xxxxx->%s, 0.0.0.0:xxxxx->%s) in docker ps output:\n%s", + unpPort1, unpPort2, out) + } + + // Run the container specifying explicit port bindings for the exposed ports + offset := 10000 + pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) + pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") + id := strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) + expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + if !strings.Contains(out, expBnd1) || !strings.Contains(out, expBnd2) { + c.Errorf("Cannot find expected port binding(s) (%s, %s) in docker ps output: %s", expBnd1, expBnd2, out) + } + // Remove container now otherwise it will interfeer with next test + stopRemoveContainer(id, c) + + // Run the container with explicit port bindings and no exposed ports + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") + id = strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + out, _ = dockerCmd(c, "ps", "-n=1") + if !strings.Contains(out, expBnd1) || !strings.Contains(out, expBnd2) { + c.Errorf("Cannot find expected port binding(s) (%s, %s) in docker ps output: %s", expBnd1, expBnd2, out) + } + // Remove container now otherwise it will interfeer with next test + stopRemoveContainer(id, c) + + // Run the container with one unpublished exposed port and one explicit port binding + dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the specified unpublished port and port mapping + out, _ = dockerCmd(c, "ps", "-n=1") + if !strings.Contains(out, unpPort1) || !strings.Contains(out, expBnd2) { + c.Errorf("Missing unpublished ports or port binding (%s, %s) in docker ps output: %s", unpPort1, expBnd2, out) + } +} diff --git a/integration-cli/docker_cli_port_unix_test.go b/integration-cli/docker_cli_port_unix_test.go new file mode 100644 index 00000000..0988ca96 --- /dev/null +++ b/integration-cli/docker_cli_port_unix_test.go @@ -0,0 +1,56 @@ +// +build !windows + +package main + +import ( + "net" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPortHostBinding(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + if !assertPortList(c, out, []string{"0.0.0.0:9876"}) { + c.Error("Port list is not correct") + } + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", "9876") + + dockerCmd(c, "rm", "-f", firstID) + + if _, _, err := dockerCmdWithError(c, "run", "--net=host", "busybox", + "nc", "localhost", "9876"); err == nil { + c.Error("Port is still bound after the Container is removed") + } +} + +func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + _, exposedPort, err := net.SplitHostPort(out) + + if err != nil { + c.Fatal(out, err) + } + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + + dockerCmd(c, "rm", "-f", firstID) + + if _, _, err = dockerCmdWithError(c, "run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)); err == nil { + c.Error("Port is still bound after the Container is removed") + } +} diff --git a/integration-cli/docker_cli_proxy_test.go b/integration-cli/docker_cli_proxy_test.go new file mode 100644 index 00000000..8b55c67d --- /dev/null +++ b/integration-cli/docker_cli_proxy_test.go @@ -0,0 +1,62 @@ +package main + +import ( + "net" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCliProxyDisableProxyUnixSock(c *check.C) { + testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. + + cmd := exec.Command(dockerBinary, "info") + cmd.Env = appendBaseEnv([]string{"HTTP_PROXY=http://127.0.0.1:9999"}) + + if out, _, err := runCommandWithOutput(cmd); err != nil { + c.Fatal(err, out) + } + +} + +// Can't use localhost here since go has a special case to not use proxy if connecting to localhost +// See https://golang.org/pkg/net/http/#ProxyFromEnvironment +func (s *DockerDaemonSuite) TestCliProxyProxyTCPSock(c *check.C) { + testRequires(c, SameHostDaemon) + // get the IP to use to connect since we can't use localhost + addrs, err := net.InterfaceAddrs() + if err != nil { + c.Fatal(err) + } + var ip string + for _, addr := range addrs { + sAddr := addr.String() + if !strings.Contains(sAddr, "127.0.0.1") { + addrArr := strings.Split(sAddr, "/") + ip = addrArr[0] + break + } + } + + if ip == "" { + c.Fatal("could not find ip to connect to") + } + + if err := s.d.Start("-H", "tcp://"+ip+":2375"); err != nil { + c.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "info") + cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"} + if out, _, err := runCommandWithOutput(cmd); err == nil { + c.Fatal(err, out) + } + + // Test with no_proxy + cmd.Env = append(cmd.Env, "NO_PROXY="+ip) + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "info")); err != nil { + c.Fatal(err, out) + } + +} diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go new file mode 100644 index 00000000..3b8f6f99 --- /dev/null +++ b/integration-cli/docker_cli_ps_test.go @@ -0,0 +1,579 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strconv" + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPsListContainers(c *check.C) { + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + secondID := strings.TrimSpace(out) + + // not long running + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + thirdID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + fourthID := strings.TrimSpace(out) + + // make sure the second is running + if err := waitRun(secondID); err != nil { + c.Fatalf("waiting for container failed: %v", err) + } + + // make sure third one is not running + dockerCmd(c, "wait", thirdID) + + // make sure the forth is running + if err := waitRun(fourthID); err != nil { + c.Fatalf("waiting for container failed: %v", err) + } + + // all + out, _ = dockerCmd(c, "ps", "-a") + if !assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + // running + out, _ = dockerCmd(c, "ps") + if !assertContainerList(out, []string{fourthID, secondID, firstID}) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + // from here all flag '-a' is ignored + + // limit + out, _ = dockerCmd(c, "ps", "-n=2", "-a") + expected := []string{fourthID, thirdID} + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + out, _ = dockerCmd(c, "ps", "-n=2") + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + // since + out, _ = dockerCmd(c, "ps", "--since", firstID, "-a") + expected = []string{fourthID, thirdID, secondID} + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + out, _ = dockerCmd(c, "ps", "--since", firstID) + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + // before + out, _ = dockerCmd(c, "ps", "--before", thirdID, "-a") + expected = []string{secondID, firstID} + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + out, _ = dockerCmd(c, "ps", "--before", thirdID) + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + // since & before + out, _ = dockerCmd(c, "ps", "--since", firstID, "--before", fourthID, "-a") + expected = []string{thirdID, secondID} + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + out, _ = dockerCmd(c, "ps", "--since", firstID, "--before", fourthID) + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + // since & limit + out, _ = dockerCmd(c, "ps", "--since", firstID, "-n=2", "-a") + expected = []string{fourthID, thirdID} + + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + out, _ = dockerCmd(c, "ps", "--since", firstID, "-n=2") + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + // before & limit + out, _ = dockerCmd(c, "ps", "--before", fourthID, "-n=1", "-a") + expected = []string{thirdID} + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + out, _ = dockerCmd(c, "ps", "--before", fourthID, "-n=1") + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + out, _ = dockerCmd(c, "ps", "--since", firstID, "--before", fourthID, "-n=1", "-a") + expected = []string{thirdID} + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + + out, _ = dockerCmd(c, "ps", "--since", firstID, "--before", fourthID, "-n=1") + if !assertContainerList(out, expected) { + c.Errorf("Container list is not in the correct order: %s", out) + } + +} + +func assertContainerList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines)-1 != len(expected) { + return false + } + + containerIDIndex := strings.Index(lines[0], "CONTAINER ID") + for i := 0; i < len(expected); i++ { + foundID := lines[i+1][containerIDIndex : containerIDIndex+12] + if foundID != expected[i][:12] { + return false + } + } + + return true +} + +func (s *DockerSuite) TestPsListContainersSize(c *check.C) { + dockerCmd(c, "run", "-d", "busybox", "echo", "hello") + + baseOut, _ := dockerCmd(c, "ps", "-s", "-n=1") + baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n") + baseSizeIndex := strings.Index(baseLines[0], "SIZE") + baseFoundsize := baseLines[1][baseSizeIndex:] + baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0]) + if err != nil { + c.Fatal(err) + } + + name := "test_size" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") + id, err := getIDByName(name) + if err != nil { + c.Fatal(err) + } + + runCmd := exec.Command(dockerBinary, "ps", "-s", "-n=1") + + wait := make(chan struct{}) + go func() { + out, _, err = runCommandWithOutput(runCmd) + close(wait) + }() + select { + case <-wait: + case <-time.After(3 * time.Second): + c.Fatalf("Calling \"docker ps -s\" timed out!") + } + if err != nil { + c.Fatal(out, err) + } + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines) != 2 { + c.Fatalf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines)) + } + sizeIndex := strings.Index(lines[0], "SIZE") + idIndex := strings.Index(lines[0], "CONTAINER ID") + foundID := lines[1][idIndex : idIndex+12] + if foundID != id[:12] { + c.Fatalf("Expected id %s, got %s", id[:12], foundID) + } + expectedSize := fmt.Sprintf("%d B", (2 + baseBytes)) + foundSize := lines[1][sizeIndex:] + if !strings.Contains(foundSize, expectedSize) { + c.Fatalf("Expected size %q, got %q", expectedSize, foundSize) + } + +} + +func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { + // FIXME: this should test paused, but it makes things hang and its wonky + // this is because paused containers can't be controlled by signals + + // start exited container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // make sure the exited cintainer is not running + dockerCmd(c, "wait", firstID) + + // start running container + out, _ = dockerCmd(c, "run", "-itd", "busybox") + secondID := strings.TrimSpace(out) + + // filter containers by exited + out, _ = dockerCmd(c, "ps", "-q", "--filter=status=exited") + containerOut := strings.TrimSpace(out) + if containerOut != firstID[:12] { + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + } + + out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=status=running") + containerOut = strings.TrimSpace(out) + if containerOut != secondID[:12] { + c.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out) + } + + out, _, _ = dockerCmdWithTimeout(time.Second*60, "ps", "-a", "-q", "--filter=status=rubbish") + if !strings.Contains(out, "Unrecognised filter value for status") { + c.Fatalf("Expected error response due to invalid status filter output: %q", out) + } + +} + +func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { + + // start container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // start another container + dockerCmd(c, "run", "-d", "busybox", "top") + + // filter containers by id + out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=id="+firstID) + containerOut := strings.TrimSpace(out) + if containerOut != firstID[:12] { + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + } + +} + +func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { + + // start container + out, _ := dockerCmd(c, "run", "-d", "--name=a_name_to_match", "busybox") + firstID := strings.TrimSpace(out) + + // start another container + dockerCmd(c, "run", "-d", "--name=b_name_to_match", "busybox", "top") + + // filter containers by name + out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=name=a_name_to_match") + containerOut := strings.TrimSpace(out) + if containerOut != firstID[:12] { + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + } + +} + +func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { + // start container + out, _ := dockerCmd(c, "run", "-d", "-l", "match=me", "-l", "second=tag", "busybox") + firstID := strings.TrimSpace(out) + + // start another container + out, _ = dockerCmd(c, "run", "-d", "-l", "match=me too", "busybox") + secondID := strings.TrimSpace(out) + + // start third container + out, _ = dockerCmd(c, "run", "-d", "-l", "nomatch=me", "busybox") + thirdID := strings.TrimSpace(out) + + // filter containers by exact match + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") + containerOut := strings.TrimSpace(out) + if containerOut != firstID { + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out) + } + + // filter containers by two labels + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") + containerOut = strings.TrimSpace(out) + if containerOut != firstID { + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out) + } + + // filter containers by two labels, but expect not found because of AND behavior + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") + containerOut = strings.TrimSpace(out) + if containerOut != "" { + c.Fatalf("Expected nothing, got %s for exited filter, output: %q", containerOut, out) + } + + // filter containers by exact key + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") + containerOut = strings.TrimSpace(out) + if (!strings.Contains(containerOut, firstID) || !strings.Contains(containerOut, secondID)) || strings.Contains(containerOut, thirdID) { + c.Fatalf("Expected ids %s,%s, got %s for exited filter, output: %q", firstID, secondID, containerOut, out) + } +} + +func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { + + dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") + + dockerCmd(c, "run", "--name", "zero1", "busybox", "true") + firstZero, err := getIDByName("zero1") + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--name", "zero2", "busybox", "true") + secondZero, err := getIDByName("zero2") + if err != nil { + c.Fatal(err) + } + + if out, _, err := dockerCmdWithError(c, "run", "--name", "nonzero1", "busybox", "false"); err == nil { + c.Fatal("Should fail.", out, err) + } + + firstNonZero, err := getIDByName("nonzero1") + if err != nil { + c.Fatal(err) + } + + if out, _, err := dockerCmdWithError(c, "run", "--name", "nonzero2", "busybox", "false"); err == nil { + c.Fatal("Should fail.", out, err) + } + secondNonZero, err := getIDByName("nonzero2") + if err != nil { + c.Fatal(err) + } + + // filter containers by exited=0 + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") + ids := strings.Split(strings.TrimSpace(out), "\n") + if len(ids) != 2 { + c.Fatalf("Should be 2 zero exited containers got %d: %s", len(ids), out) + } + if ids[0] != secondZero { + c.Fatalf("First in list should be %q, got %q", secondZero, ids[0]) + } + if ids[1] != firstZero { + c.Fatalf("Second in list should be %q, got %q", firstZero, ids[1]) + } + + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") + ids = strings.Split(strings.TrimSpace(out), "\n") + if len(ids) != 2 { + c.Fatalf("Should be 2 zero exited containers got %d", len(ids)) + } + if ids[0] != secondNonZero { + c.Fatalf("First in list should be %q, got %q", secondNonZero, ids[0]) + } + if ids[1] != firstNonZero { + c.Fatalf("Second in list should be %q, got %q", firstNonZero, ids[1]) + } + +} + +func (s *DockerSuite) TestPsRightTagName(c *check.C) { + tag := "asybox:shmatest" + dockerCmd(c, "tag", "busybox", tag) + + var id1 string + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 = strings.TrimSpace(string(out)) + + var id2 string + out, _ = dockerCmd(c, "run", "-d", tag, "top") + id2 = strings.TrimSpace(string(out)) + + var imageID string + out, _ = dockerCmd(c, "inspect", "-f", "{{.Id}}", "busybox") + imageID = strings.TrimSpace(string(out)) + + var id3 string + out, _ = dockerCmd(c, "run", "-d", imageID, "top") + id3 = strings.TrimSpace(string(out)) + + out, _ = dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // skip header + lines = lines[1:] + if len(lines) != 3 { + c.Fatalf("There should be 3 running container, got %d", len(lines)) + } + for _, line := range lines { + f := strings.Fields(line) + switch f[0] { + case id1: + if f[1] != "busybox" { + c.Fatalf("Expected %s tag for id %s, got %s", "busybox", id1, f[1]) + } + case id2: + if f[1] != tag { + c.Fatalf("Expected %s tag for id %s, got %s", tag, id2, f[1]) + } + case id3: + if f[1] != imageID { + c.Fatalf("Expected %s imageID for id %s, got %s", tag, id3, f[1]) + } + default: + c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) + } + } +} + +func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { + dockerCmd(c, "run", "--name=first", "-d", "busybox", "top") + dockerCmd(c, "run", "--name=second", "--link=first:first", "-d", "busybox", "top") + + out, _ := dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // strip header + lines = lines[1:] + expected := []string{"second", "first,second/first"} + var names []string + for _, l := range lines { + fields := strings.Fields(l) + names = append(names, fields[len(fields)-1]) + } + if !reflect.DeepEqual(expected, names) { + c.Fatalf("Expected array: %v, got: %v", expected, names) + } +} + +func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { + + portRange := "3800-3900" + dockerCmd(c, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top") + + out, _ := dockerCmd(c, "ps") + + // check that the port range is in the output + if !strings.Contains(string(out), portRange) { + c.Fatalf("docker ps output should have had the port range %q: %s", portRange, string(out)) + } + +} + +func (s *DockerSuite) TestPsWithSize(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "sizetest", "busybox", "top") + + out, _ := dockerCmd(c, "ps", "--size") + if !strings.Contains(out, "virtual") { + c.Fatalf("docker ps with --size should show virtual size of container") + } +} + +func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + shortCID := cID[:12] + + // Make sure it DOESN'T show up w/o a '-a' for normal 'ps' + out, _ = dockerCmd(c, "ps", "-q") + if strings.Contains(out, shortCID) { + c.Fatalf("Should have not seen '%s' in ps output:\n%s", shortCID, out) + } + + // Make sure it DOES show up as 'Created' for 'ps -a' + out, _ = dockerCmd(c, "ps", "-a") + + hits := 0 + for _, line := range strings.Split(out, "\n") { + if !strings.Contains(line, shortCID) { + continue + } + hits++ + if !strings.Contains(line, "Created") { + c.Fatalf("Missing 'Created' on '%s'", line) + } + } + + if hits != 1 { + c.Fatalf("Should have seen '%s' in ps -a output once:%d\n%s", shortCID, hits, out) + } + + // filter containers by 'create' - note, no -a needed + out, _ = dockerCmd(c, "ps", "-q", "-f", "status=created") + containerOut := strings.TrimSpace(out) + if !strings.HasPrefix(cID, containerOut) { + c.Fatalf("Expected id %s, got %s for filter, out: %s", cID, containerOut, out) + } +} + +func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { + //create 2 containers and link them + dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") + dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") + + //use the new format capabilities to only list the names and --no-trunc to get all names + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"parent", "child,parent/linkedone"} + var names []string + for _, l := range lines { + names = append(names, l) + } + if !reflect.DeepEqual(expected, names) { + c.Fatalf("Expected array with non-truncated names: %v, got: %v", expected, names) + } + + //now list without turning off truncation and make sure we only get the non-link names + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + expected = []string{"parent", "child"} + var truncNames []string + for _, l := range lines { + truncNames = append(truncNames, l) + } + if !reflect.DeepEqual(expected, truncNames) { + c.Fatalf("Expected array with truncated names: %v, got: %v", expected, truncNames) + } + +} + +func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { + // make sure no-container "docker ps" still prints the header row + out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") + if out != "CONTAINER ID\n" { + c.Fatalf(`Expected 'CONTAINER ID\n', got %v`, out) + } + + // verify that "docker ps" with a container still prints the header row also + dockerCmd(c, "run", "--name=test", "-d", "busybox", "top") + out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") + if out != "NAMES\ntest\n" { + c.Fatalf(`Expected 'NAMES\ntest\n', got %v`, out) + } +} + +func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { + config := `{ + "psFormat": "{{ .ID }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "run", "--name=test", "-d", "busybox", "top") + id := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "--config", d, "ps", "-q") + if !strings.HasPrefix(id, strings.TrimSpace(out)) { + c.Fatalf("Expected to print only the container id, got %v\n", out) + } +} diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go new file mode 100644 index 00000000..8bfca8db --- /dev/null +++ b/integration-cli/docker_cli_pull_test.go @@ -0,0 +1,662 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/distribution/digest" + "github.com/go-check/check" +) + +// See issue docker/docker#8141 +func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh"} { + repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) + } + + // Tag and push the same image multiple times. + for _, repo := range repos { + dockerCmd(c, "tag", "busybox", repo) + dockerCmd(c, "push", repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Pull a single tag and verify it doesn't bring down all aliases. + dockerCmd(c, "pull", repos[0]) + dockerCmd(c, "inspect", repos[0]) + for _, repo := range repos[1:] { + if _, _, err := dockerCmdWithError(c, "inspect", repo); err == nil { + c.Fatalf("Image %v shouldn't have been pulled down", repo) + } + } +} + +// pulling library/hello-world should show verified message +func (s *DockerSuite) TestPullVerified(c *check.C) { + c.Skip("Skipping hub dependent test") + + // Image must be pulled from central repository to get verified message + // unless keychain is manually updated to contain the daemon's sign key. + + verifiedName := "hello-world" + + // pull it + expected := "The image you are pulling has been verified" + if out, exitCode, err := dockerCmdWithError(c, "pull", verifiedName); err != nil || !strings.Contains(out, expected) { + if err != nil || exitCode != 0 { + c.Skip(fmt.Sprintf("pulling the '%s' image from the registry has failed: %v", verifiedName, err)) + } + c.Fatalf("pulling a verified image failed. expected: %s\ngot: %s, %v", expected, out, err) + } + + // pull it again + if out, exitCode, err := dockerCmdWithError(c, "pull", verifiedName); err != nil || strings.Contains(out, expected) { + if err != nil || exitCode != 0 { + c.Skip(fmt.Sprintf("pulling the '%s' image from the registry has failed: %v", verifiedName, err)) + } + c.Fatalf("pulling a verified image failed. unexpected verify message\ngot: %s, %v", out, err) + } + +} + +// pulling an image from the central registry should work +func (s *DockerSuite) TestPullImageFromCentralRegistry(c *check.C) { + testRequires(c, Network) + + dockerCmd(c, "pull", "hello-world") +} + +// pulling a non-existing image from the central registry should return a non-zero exit code +func (s *DockerSuite) TestPullNonExistingImage(c *check.C) { + testRequires(c, Network) + + name := "sadfsadfasdf" + out, _, err := dockerCmdWithError(c, "pull", name) + + if err == nil || !strings.Contains(out, fmt.Sprintf("Error: image library/%s:latest not found", name)) { + c.Fatalf("expected non-zero exit status when pulling non-existing image: %s", out) + } +} + +// pulling an image from the central registry using official names should work +// ensure all pulls result in the same image +func (s *DockerSuite) TestPullImageOfficialNames(c *check.C) { + testRequires(c, Network) + + names := []string{ + "library/hello-world", + "docker.io/library/hello-world", + "index.docker.io/library/hello-world", + } + for _, name := range names { + out, exitCode, err := dockerCmdWithError(c, "pull", name) + if err != nil || exitCode != 0 { + c.Errorf("pulling the '%s' image from the registry has failed: %s", name, err) + continue + } + + // ensure we don't have multiple image names. + out, _ = dockerCmd(c, "images") + if strings.Contains(out, name) { + c.Errorf("images should not have listed '%s'", name) + } + } +} + +func (s *DockerSuite) TestPullScratchNotAllowed(c *check.C) { + testRequires(c, Network) + + out, exitCode, err := dockerCmdWithError(c, "pull", "scratch") + if err == nil { + c.Fatal("expected pull of scratch to fail, but it didn't") + } + if exitCode != 1 { + c.Fatalf("pulling scratch expected exit code 1, got %d", exitCode) + } + if strings.Contains(out, "Pulling repository scratch") { + c.Fatalf("pulling scratch should not have begun: %s", out) + } + if !strings.Contains(out, "'scratch' is a reserved name") { + c.Fatalf("unexpected output pulling scratch: %s", out) + } +} + +// pulling an image with --all-tags=true +func (s *DockerSuite) TestPullImageWithAllTagFromCentralRegistry(c *check.C) { + testRequires(c, Network) + + dockerCmd(c, "pull", "busybox") + + outImageCmd, _ := dockerCmd(c, "images", "busybox") + + dockerCmd(c, "pull", "--all-tags=true", "busybox") + + outImageAllTagCmd, _ := dockerCmd(c, "images", "busybox") + + if strings.Count(outImageCmd, "busybox") >= strings.Count(outImageAllTagCmd, "busybox") { + c.Fatalf("Pulling with all tags should get more images") + } + + // FIXME has probably no effect (tags already pushed) + dockerCmd(c, "pull", "-a", "busybox") + + outImageAllTagCmd, _ = dockerCmd(c, "images", "busybox") + + if strings.Count(outImageCmd, "busybox") >= strings.Count(outImageAllTagCmd, "busybox") { + c.Fatalf("Pulling with all tags should get more images") + } +} + +func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-pull") + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + if err != nil { + c.Fatalf("Error running trusted pull: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try untrusted pull to ensure we pushed the tag to the registry + pullCmd = exec.Command(dockerBinary, "pull", "--disable-content-trust=true", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + if err != nil { + c.Fatalf("Error running trusted pull: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on trusted pull with --disable-content-trust:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolatd-pull") + + // Try pull (run from isolated directory without trust information) + pullCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + if err != nil { + c.Fatalf("Error running trusted pull: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + // Try trusted pull on untrusted tag + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + if err == nil { + c.Fatalf("Error expected when running trusted pull with:\n%s", out) + } + + if !strings.Contains(string(out), "no trust data available") { + c.Fatalf("Missing expected output on trusted pull:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-cert-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + if err == nil { + c.Fatalf("Error running trusted pull in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "could not validate the path to a trusted root") { + c.Fatalf("Missing expected output on trusted pull in the distant future:\n%s", out) + } + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", "--disable-content-trust", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + if err != nil { + c.Fatalf("Error running untrusted pull in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on untrusted pull in the distant future:\n%s", out) + } + }) +} + +func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + if err != nil { + c.Fatalf("Error running trusted pull: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + if err != nil { + c.Fatalf("Restarting notary server failed.") + } + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + // Now, try pulling with the original client from this new trust server. This should fail. + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + if err == nil { + c.Fatalf("Expected to fail on this pull due to different remote data: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "failed to validate data with current trusted certificates") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppull/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("trusted push failed: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Snapshots last for three years. This should be expired + fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) + + // Should succeed because the server transparently re-signs one + runAtDifferentDate(fourYearsLater, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + if err == nil { + c.Fatalf("Missing expected error running trusted pull with expired snapshots") + } + + if !strings.Contains(string(out), "repository out-of-date") { + c.Fatalf("Missing expected output on trusted pull with expired snapshot:\n%s", out) + } + }) +} + +// Test that pull continues after client has disconnected. #15589 +func (s *DockerTrustSuite) TestPullClientDisconnect(c *check.C) { + testRequires(c, Network) + + repoName := "hello-world:latest" + + dockerCmdWithError(c, "rmi", repoName) // clean just in case + + pullCmd := exec.Command(dockerBinary, "pull", repoName) + + stdout, err := pullCmd.StdoutPipe() + c.Assert(err, check.IsNil) + + err = pullCmd.Start() + c.Assert(err, check.IsNil) + + // cancel as soon as we get some output + buf := make([]byte, 10) + _, err = stdout.Read(buf) + c.Assert(err, check.IsNil) + + err = pullCmd.Process.Kill() + c.Assert(err, check.IsNil) + + maxAttempts := 20 + for i := 0; ; i++ { + if _, _, err := dockerCmdWithError(c, "inspect", repoName); err == nil { + break + } + if i >= maxAttempts { + c.Fatal("Timeout reached. Image was not pulled after client disconnected.") + } + time.Sleep(500 * time.Millisecond) + } + +} + +type idAndParent struct { + ID string + Parent string +} + +func inspectImage(c *check.C, imageRef string) idAndParent { + out, _ := dockerCmd(c, "inspect", imageRef) + var inspectOutput []idAndParent + err := json.Unmarshal([]byte(out), &inspectOutput) + if err != nil { + c.Fatal(err) + } + + return inspectOutput[0] +} + +func imageID(c *check.C, imageRef string) string { + return inspectImage(c, imageRef).ID +} + +func imageParent(c *check.C, imageRef string) string { + return inspectImage(c, imageRef).Parent +} + +// TestPullMigration verifies that pulling an image based on layers +// that already exists locally will reuse those existing layers. +func (s *DockerRegistrySuite) TestPullMigration(c *check.C) { + repoName := privateRegistryURL + "/dockercli/migration" + + baseImage := repoName + ":base" + _, err := buildImage(baseImage, fmt.Sprintf(` + FROM scratch + ENV IMAGE base + CMD echo %s + `, baseImage), true) + if err != nil { + c.Fatal(err) + } + + baseIDBeforePush := imageID(c, baseImage) + baseParentBeforePush := imageParent(c, baseImage) + + derivedImage := repoName + ":derived" + _, err = buildImage(derivedImage, fmt.Sprintf(` + FROM %s + CMD echo %s + `, baseImage, derivedImage), true) + if err != nil { + c.Fatal(err) + } + + derivedIDBeforePush := imageID(c, derivedImage) + + dockerCmd(c, "push", derivedImage) + + // Remove derived image from the local store + dockerCmd(c, "rmi", derivedImage) + + // Repull + dockerCmd(c, "pull", derivedImage) + + // Check that the parent of this pulled image is the original base + // image + derivedIDAfterPull1 := imageID(c, derivedImage) + derivedParentAfterPull1 := imageParent(c, derivedImage) + + if derivedIDAfterPull1 == derivedIDBeforePush { + c.Fatal("image's ID should have changed on after deleting and pulling") + } + + if derivedParentAfterPull1 != baseIDBeforePush { + c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull1, baseIDBeforePush) + } + + // Confirm that repushing and repulling does not change the computed ID + dockerCmd(c, "push", derivedImage) + dockerCmd(c, "rmi", derivedImage) + dockerCmd(c, "pull", derivedImage) + + derivedIDAfterPull2 := imageID(c, derivedImage) + derivedParentAfterPull2 := imageParent(c, derivedImage) + + if derivedIDAfterPull2 != derivedIDAfterPull1 { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + if derivedParentAfterPull2 != baseIDBeforePush { + c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull2, baseIDBeforePush) + } + + // Remove everything, repull, and make sure everything uses computed IDs + dockerCmd(c, "rmi", baseImage, derivedImage) + dockerCmd(c, "pull", derivedImage) + + derivedIDAfterPull3 := imageID(c, derivedImage) + derivedParentAfterPull3 := imageParent(c, derivedImage) + derivedGrandparentAfterPull3 := imageParent(c, derivedParentAfterPull3) + + if derivedIDAfterPull3 != derivedIDAfterPull1 { + c.Fatal("image's ID unexpectedly changed after a second repull") + } + + if derivedParentAfterPull3 == baseIDBeforePush { + c.Fatalf("pulled image's parent ID (%s) should not match base image's original ID (%s)", derivedParentAfterPull3, derivedIDBeforePush) + } + + if derivedGrandparentAfterPull3 == baseParentBeforePush { + c.Fatal("base image's parent ID should have been rewritten on pull") + } +} + +// TestPullMigrationRun verifies that pulling an image based on layers +// that already exists locally will result in an image that runs properly. +func (s *DockerRegistrySuite) TestPullMigrationRun(c *check.C) { + type idAndParent struct { + ID string + Parent string + } + + derivedImage := privateRegistryURL + "/dockercli/migration-run" + baseImage := "busybox" + + _, err := buildImage(derivedImage, fmt.Sprintf(` + FROM %s + RUN dd if=/dev/zero of=/file bs=1024 count=1024 + CMD echo %s + `, baseImage, derivedImage), true) + if err != nil { + c.Fatal(err) + } + + baseIDBeforePush := imageID(c, baseImage) + derivedIDBeforePush := imageID(c, derivedImage) + + dockerCmd(c, "push", derivedImage) + + // Remove derived image from the local store + dockerCmd(c, "rmi", derivedImage) + + // Repull + dockerCmd(c, "pull", derivedImage) + + // Check that this pulled image is based on the original base image + derivedIDAfterPull1 := imageID(c, derivedImage) + derivedParentAfterPull1 := imageParent(c, imageParent(c, derivedImage)) + + if derivedIDAfterPull1 == derivedIDBeforePush { + c.Fatal("image's ID should have changed on after deleting and pulling") + } + + if derivedParentAfterPull1 != baseIDBeforePush { + c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull1, baseIDBeforePush) + } + + // Make sure the image runs correctly + out, _ := dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } + + // Confirm that repushing and repulling does not change the computed ID + dockerCmd(c, "push", derivedImage) + dockerCmd(c, "rmi", derivedImage) + dockerCmd(c, "pull", derivedImage) + + derivedIDAfterPull2 := imageID(c, derivedImage) + derivedParentAfterPull2 := imageParent(c, imageParent(c, derivedImage)) + + if derivedIDAfterPull2 != derivedIDAfterPull1 { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + if derivedParentAfterPull2 != baseIDBeforePush { + c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull2, baseIDBeforePush) + } + + // Make sure the image still runs + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } +} + +// TestPullConflict provides coverage of the situation where a computed +// strongID conflicts with some unverifiable data in the graph. +func (s *DockerRegistrySuite) TestPullConflict(c *check.C) { + repoName := privateRegistryURL + "/dockercli/conflict" + + _, err := buildImage(repoName, ` + FROM scratch + ENV IMAGE conflict + CMD echo conflict + `, true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "push", repoName) + + // Pull to make it content-addressable + dockerCmd(c, "rmi", repoName) + dockerCmd(c, "pull", repoName) + + IDBeforeLoad := imageID(c, repoName) + + // Load/save to turn this into an unverified image with the same ID + tmpDir, err := ioutil.TempDir("", "conflict-save-output") + if err != nil { + c.Errorf("failed to create temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + tarFile := filepath.Join(tmpDir, "repo.tar") + + dockerCmd(c, "save", "-o", tarFile, repoName) + dockerCmd(c, "rmi", repoName) + dockerCmd(c, "load", "-i", tarFile) + + // Check that the the ID is the same after save/load. + IDAfterLoad := imageID(c, repoName) + + if IDAfterLoad != IDBeforeLoad { + c.Fatal("image's ID should be the same after save/load") + } + + // Repull + dockerCmd(c, "pull", repoName) + + // Check that the ID is now different because of the conflict. + IDAfterPull1 := imageID(c, repoName) + + // Expect the new ID to be SHA256(oldID) + expectedIDDigest, err := digest.FromBytes([]byte(IDBeforeLoad)) + if err != nil { + c.Fatalf("digest error: %v", err) + } + expectedID := expectedIDDigest.Hex() + if IDAfterPull1 != expectedID { + c.Fatalf("image's ID should have changed on pull to %s (got %s)", expectedID, IDAfterPull1) + } + + // A second pull should use the new ID again. + dockerCmd(c, "pull", repoName) + + IDAfterPull2 := imageID(c, repoName) + + if IDAfterPull2 != IDAfterPull1 { + c.Fatal("image's ID unexpectedly changed after a repull") + } +} diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go new file mode 100644 index 00000000..111e9f33 --- /dev/null +++ b/integration-cli/docker_cli_push_test.go @@ -0,0 +1,367 @@ +package main + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "time" + + "github.com/go-check/check" +) + +// Pushing an image to a private registry. +func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) +} + +// pushing an image without a prefix should throw an error +func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { + if out, _, err := dockerCmdWithError(c, "push", "busybox"); err == nil { + c.Fatalf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out) + } +} + +func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + expected := "Repository does not exist" + if out, _, err := dockerCmdWithError(c, "push", repoName); err == nil { + c.Fatalf("pushing the image to the private registry should have failed: output %q", out) + } else if !strings.Contains(out, expected) { + c.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out) + } +} + +func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) + + expected := "does not exist" + + if out, _, err := dockerCmdWithError(c, "push", repoName); err == nil { + c.Fatalf("pushing the image to the private registry should have failed: output %q", out) + } else if !strings.Contains(out, expected) { + c.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out) + } +} + +func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) + repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + + dockerCmd(c, "tag", "busybox", repoTag2) + + dockerCmd(c, "push", repoName) + + // Ensure layer list is equivalent for repoTag1 and repoTag2 + out1, _ := dockerCmd(c, "pull", repoTag1) + if strings.Contains(out1, "Tag t1 not found") { + c.Fatalf("Unable to pull pushed image: %s", out1) + } + imageAlreadyExists := ": Image already exists" + var out1Lines []string + for _, outputLine := range strings.Split(out1, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + + out2, _ := dockerCmd(c, "pull", repoTag2) + if strings.Contains(out2, "Tag t2 not found") { + c.Fatalf("Unable to pull pushed image: %s", out1) + } + var out2Lines []string + for _, outputLine := range strings.Split(out2, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + + if len(out1Lines) != len(out2Lines) { + c.Fatalf("Mismatched output length:\n%s\n%s", out1, out2) + } + + for i := range out1Lines { + if out1Lines[i] != out2Lines[i] { + c.Fatalf("Mismatched output line:\n%s\n%s", out1Lines[i], out2Lines[i]) + } + } +} + +func (s *DockerRegistrySuite) TestPushInterrupt(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + if err := pushCmd.Start(); err != nil { + c.Fatalf("Failed to start pushing to private registry: %v", err) + } + + // Interrupt push (yes, we have no idea at what point it will get killed). + time.Sleep(200 * time.Millisecond) + if err := pushCmd.Process.Kill(); err != nil { + c.Fatalf("Failed to kill push process: %v", err) + } + if out, _, err := dockerCmdWithError(c, "push", repoName); err == nil { + if !strings.Contains(out, "already in progress") { + c.Fatalf("Push should be continued on daemon side, but seems ok: %v, %s", err, out) + } + } + // now wait until all this pushes will complete + // if it failed with timeout - there would be some error, + // so no logic about it here + for exec.Command(dockerBinary, "push", repoName).Run() != nil { + } +} + +func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) + emptyTarball, err := ioutil.TempFile("", "empty_tarball") + if err != nil { + c.Fatalf("Unable to create test file: %v", err) + } + tw := tar.NewWriter(emptyTarball) + err = tw.Close() + if err != nil { + c.Fatalf("Error creating empty tarball: %v", err) + } + freader, err := os.Open(emptyTarball.Name()) + if err != nil { + c.Fatalf("Could not open test tarball: %v", err) + } + + importCmd := exec.Command(dockerBinary, "import", "-", repoName) + importCmd.Stdin = freader + out, _, err := runCommandWithOutput(importCmd) + if err != nil { + c.Errorf("import failed with errors: %v, output: %q", err, out) + } + + // Now verify we can push it + if out, _, err := dockerCmdWithError(c, "push", repoName); err != nil { + c.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) + } +} + +func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPushWithFaillingServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmdWithServer(pushCmd, "example/") + out, _, err := runCommandWithOutput(pushCmd) + if err == nil { + c.Fatalf("Missing error while running trusted push w/ no server") + } + + if !strings.Contains(string(out), "Error establishing connection to notary repository") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", "--disable-content-trust", repoName) + s.trustedCmdWithServer(pushCmd, "example/") + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("trusted push with no server and --disable-content-trust failed: %s\n%s", err, out) + } + + if strings.Contains(string(out), "Error establishing connection to notary repository") { + c.Fatalf("Missing expected output on trusted push with --disable-content-trust:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("trusted push failed: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push with existing tag:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Do a trusted push + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("trusted push failed: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push with existing tag:\n%s", out) + } + + // Do another trusted push + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("trusted push failed: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push with existing tag:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try pull to ensure the double push did not break our ability to pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + if err != nil { + c.Fatalf("Error running trusted pull: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on trusted pull with --disable-content-trust:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("trusted push failed: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + // Push with wrong passphrases + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321") + out, _, err = runCommandWithOutput(pushCmd) + if err == nil { + c.Fatalf("Error missing from trusted push with short targets passphrase: \n%s", out) + } + + if !strings.Contains(string(out), "password invalid, operation has failed") { + c.Fatalf("Missing expected output on trusted push with short targets/snapsnot passphrase:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPushWithExpiredSnapshot(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredsnapshot/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("trusted push failed: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + // Snapshots last for three years. This should be expired + fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) + + runAtDifferentDate(fourYearsLater, func() { + // Push with wrong passphrases + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + if err == nil { + c.Fatalf("Error missing from trusted push with expired snapshot: \n%s", out) + } + + if !strings.Contains(string(out), "repository out-of-date") { + c.Fatalf("Missing expected output on trusted push with expired snapshot:\n%s", out) + } + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExpiredTimestamp(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("trusted push failed: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + // The timestamps expire in two weeks. Lets check three + threeWeeksLater := time.Now().Add(time.Hour * 24 * 21) + + // Should succeed because the server transparently re-signs one + runAtDifferentDate(threeWeeksLater, func() { + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push with expired timestamp:\n%s", out) + } + }) +} diff --git a/integration-cli/docker_cli_rename_test.go b/integration-cli/docker_cli_rename_test.go new file mode 100644 index 00000000..cac9f3aa --- /dev/null +++ b/integration-cli/docker_cli_rename_test.go @@ -0,0 +1,76 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + name, err := inspectField(cleanedContainerID, "Name") + newName := "new_name" + stringid.GenerateRandomID() + dockerCmd(c, "rename", "first_name", newName) + + name, err = inspectField(cleanedContainerID, "Name") + if err != nil { + c.Fatal(err) + } + if name != "/"+newName { + c.Fatal("Failed to rename container ", name) + } + +} + +func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateRandomID() + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name, err := inspectField(cleanedContainerID, "Name") + if err != nil { + c.Fatal(err) + } + if name != "/"+newName { + c.Fatal("Failed to rename container ") + } +} + +func (s *DockerSuite) TestRenameCheckNames(c *check.C) { + dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateRandomID() + dockerCmd(c, "rename", "first_name", newName) + + name, err := inspectField(newName, "Name") + if err != nil { + c.Fatal(err) + } + if name != "/"+newName { + c.Fatal("Failed to rename container ") + } + + name, err = inspectField("first_name", "Name") + if err == nil && !strings.Contains(err.Error(), "No such image or container: first_name") { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestRenameInvalidName(c *check.C) { + dockerCmd(c, "run", "--name", "myname", "-d", "busybox", "top") + + if out, _, err := dockerCmdWithError(c, "rename", "myname", "new:invalid"); err == nil || !strings.Contains(out, "Invalid container name") { + c.Fatalf("Renaming container to invalid name should have failed: %s\n%v", out, err) + } + + if out, _, err := dockerCmdWithError(c, "ps", "-a"); err != nil || !strings.Contains(out, "myname") { + c.Fatalf("Output of docker ps should have included 'myname': %s\n%v", out, err) + } +} diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go new file mode 100644 index 00000000..4cabeb9c --- /dev/null +++ b/integration-cli/docker_cli_restart_test.go @@ -0,0 +1,143 @@ +package main + +import ( + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", "foobar") + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + if out != "foobar\n" { + c.Errorf("container should've printed 'foobar'") + } + + dockerCmd(c, "restart", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + if out != "foobar\nfoobar\n" { + c.Errorf("container should've printed 'foobar' twice, got %v", out) + } +} + +func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") + + cleanedContainerID := strings.TrimSpace(out) + + time.Sleep(1 * time.Second) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + if out != "foobar\n" { + c.Errorf("container should've printed 'foobar'") + } + + dockerCmd(c, "restart", "-t", "1", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + + time.Sleep(1 * time.Second) + + if out != "foobar\nfoobar\n" { + c.Errorf("container should've printed 'foobar' twice") + } +} + +// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. +func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-v", "/test", "busybox", "top") + + cleanedContainerID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "inspect", "--format", "{{ len .Mounts }}", cleanedContainerID) + + if out = strings.Trim(out, " \n\r"); out != "1" { + c.Errorf("expect 1 volume received %s", out) + } + + source, err := inspectMountSourceField(cleanedContainerID, "/test") + c.Assert(err, check.IsNil) + + dockerCmd(c, "restart", cleanedContainerID) + + out, _ = dockerCmd(c, "inspect", "--format", "{{ len .Mounts }}", cleanedContainerID) + if out = strings.Trim(out, " \n\r"); out != "1" { + c.Errorf("expect 1 volume after restart received %s", out) + } + + sourceAfterRestart, err := inspectMountSourceField(cleanedContainerID, "/test") + c.Assert(err, check.IsNil) + + if source != sourceAfterRestart { + c.Errorf("expected volume path: %s Actual path: %s", source, sourceAfterRestart) + } +} + +func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=no", "busybox", "false") + + id := strings.TrimSpace(string(out)) + name, err := inspectField(id, "HostConfig.RestartPolicy.Name") + c.Assert(err, check.IsNil) + if name != "no" { + c.Fatalf("Container restart policy name is %s, expected %s", name, "no") + } +} + +func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false") + + id := strings.TrimSpace(string(out)) + name, err := inspectField(id, "HostConfig.RestartPolicy.Name") + c.Assert(err, check.IsNil) + if name != "always" { + c.Fatalf("Container restart policy name is %s, expected %s", name, "always") + } + + MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(err, check.IsNil) + + // MaximumRetryCount=0 if the restart policy is always + if MaximumRetryCount != "0" { + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "0") + } +} + +func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:1", "busybox", "false") + + id := strings.TrimSpace(string(out)) + name, err := inspectField(id, "HostConfig.RestartPolicy.Name") + c.Assert(err, check.IsNil) + if name != "on-failure" { + c.Fatalf("Container restart policy name is %s, expected %s", name, "on-failure") + } + +} + +// a good container with --restart=on-failure:3 +// MaximumRetryCount!=0; RestartCount=0 +func (s *DockerSuite) TestContainerRestartwithGoodContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "true") + + id := strings.TrimSpace(string(out)) + if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 5); err != nil { + c.Fatal(err) + } + count, err := inspectField(id, "RestartCount") + c.Assert(err, check.IsNil) + if count != "0" { + c.Fatalf("Container was restarted %s times, expected %d", count, 0) + } + MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(err, check.IsNil) + if MaximumRetryCount != "3" { + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") + } + +} diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go new file mode 100644 index 00000000..0e57551b --- /dev/null +++ b/integration-cli/docker_cli_rm_test.go @@ -0,0 +1,91 @@ +package main + +import ( + "os" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + dockerCmd(c, "run", "--name", "losemyvolumes", "-v", "/tmp/testing:/test", "busybox", "true") + + if err := os.Remove("/tmp/testing"); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "rm", "-v", "losemyvolumes") +} + +func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { + dockerCmd(c, "run", "--name", "foo", "-v", "/srv", "busybox", "true") + + dockerCmd(c, "rm", "-v", "foo") +} + +func (s *DockerSuite) TestRmRunningContainer(c *check.C) { + createRunningContainer(c, "foo") + + if _, _, err := dockerCmdWithError(c, "rm", "foo"); err == nil { + c.Fatalf("Expected error, can't rm a running container") + } +} + +func (s *DockerSuite) TestRmForceRemoveRunningContainer(c *check.C) { + createRunningContainer(c, "foo") + + // Stop then remove with -s + dockerCmd(c, "rm", "-f", "foo") +} + +func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { + + dockerfile1 := `FROM busybox:latest + ENTRYPOINT ["/bin/true"]` + img := "test-container-orphaning" + dockerfile2 := `FROM busybox:latest + ENTRYPOINT ["/bin/true"] + MAINTAINER Integration Tests` + + // build first dockerfile + img1, err := buildImage(img, dockerfile1, true) + if err != nil { + c.Fatalf("Could not build image %s: %v", img, err) + } + // run container on first image + if out, _, err := dockerCmdWithError(c, "run", img); err != nil { + c.Fatalf("Could not run image %s: %v: %s", img, err, out) + } + + // rebuild dockerfile with a small addition at the end + if _, err := buildImage(img, dockerfile2, true); err != nil { + c.Fatalf("Could not rebuild image %s: %v", img, err) + } + // try to remove the image, should error out. + if out, _, err := dockerCmdWithError(c, "rmi", img); err == nil { + c.Fatalf("Expected to error out removing the image, but succeeded: %s", out) + } + + // check if we deleted the first image + out, _, err := dockerCmdWithError(c, "images", "-q", "--no-trunc") + if err != nil { + c.Fatalf("%v: %s", err, out) + } + if !strings.Contains(out, img1) { + c.Fatalf("Orphaned container (could not find %q in docker images): %s", img1, out) + } +} + +func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { + if out, _, err := dockerCmdWithError(c, "rm", "unknown"); err == nil { + c.Fatal("Expected error on rm unknown container, got none") + } else if !strings.Contains(out, "failed to remove containers") { + c.Fatalf("Expected output to contain 'failed to remove containers', got %q", out) + } +} + +func createRunningContainer(c *check.C, name string) { + dockerCmd(c, "run", "-dt", "--name", name, "busybox", "top") +} diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go new file mode 100644 index 00000000..8d9f94f2 --- /dev/null +++ b/integration-cli/docker_cli_rmi_test.go @@ -0,0 +1,277 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { + errSubstr := "is using it" + + // create a container + out, _, err := dockerCmdWithError(c, "run", "-d", "busybox", "true") + if err != nil { + c.Fatalf("failed to create a container: %s, %v", out, err) + } + + cleanedContainerID := strings.TrimSpace(out) + + // try to delete the image + out, _, err = dockerCmdWithError(c, "rmi", "busybox") + if err == nil { + c.Fatalf("Container %q is using image, should not be able to rmi: %q", cleanedContainerID, out) + } + if !strings.Contains(out, errSubstr) { + c.Fatalf("Container %q is using image, error message should contain %q: %v", cleanedContainerID, errSubstr, out) + } + + // make sure it didn't delete the busybox name + images, _ := dockerCmd(c, "images") + if !strings.Contains(images, "busybox") { + c.Fatalf("The name 'busybox' should not have been removed from images: %q", images) + } +} + +func (s *DockerSuite) TestRmiTag(c *check.C) { + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox", "utest:tag1") + dockerCmd(c, "tag", "busybox", "utest/docker:tag2") + dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+3 { + c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + } + dockerCmd(c, "rmi", "utest/docker:tag2") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+2 { + c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } + dockerCmd(c, "rmi", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+1 { + c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } + dockerCmd(c, "rmi", "utest:tag1") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+0 { + c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } +} + +func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'") + if err != nil { + c.Fatalf("failed to create a container:%s, %v", out, err) + } + + containerID := strings.TrimSpace(out) + out, _, err = dockerCmdWithError(c, "commit", containerID, "busybox-one") + if err != nil { + c.Fatalf("failed to commit a new busybox-one:%s, %v", out, err) + } + + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") + dockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") + + imagesAfter, _ := dockerCmd(c, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+2 { + c.Fatalf("tag busybox to create 2 more images with same imageID; docker images shows: %q\n", imagesAfter) + } + + imgID, err := inspectField("busybox-one:tag1", "Id") + c.Assert(err, check.IsNil) + + // run a container with the image + out, _, err = dockerCmdWithError(c, "run", "-d", "busybox-one", "top") + if err != nil { + c.Fatalf("failed to create a container:%s, %v", out, err) + } + + containerID = strings.TrimSpace(out) + + // first checkout without force it fails + out, _, err = dockerCmdWithError(c, "rmi", imgID) + expected := fmt.Sprintf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", imgID[:12], containerID[:12]) + if err == nil || !strings.Contains(out, expected) { + c.Fatalf("rmi tagged in multiple repos should have failed without force: %s, %v, expected: %s", out, err, expected) + } + + dockerCmd(c, "stop", containerID) + dockerCmd(c, "rmi", "-f", imgID) + + imagesAfter, _ = dockerCmd(c, "images", "-a") + if strings.Contains(imagesAfter, imgID[:12]) { + c.Fatalf("rmi -f %s failed, image still exists: %q\n\n", imgID, imagesAfter) + } +} + +func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'") + if err != nil { + c.Fatalf("failed to create a container:%s, %v", out, err) + } + + containerID := strings.TrimSpace(out) + out, _, err = dockerCmdWithError(c, "commit", containerID, "busybox-test") + if err != nil { + c.Fatalf("failed to commit a new busybox-test:%s, %v", out, err) + } + + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox-test", "utest:tag1") + dockerCmd(c, "tag", "busybox-test", "utest:tag2") + dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") + dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+4 { + c.Fatalf("tag busybox to create 4 more images with same imageID; docker images shows: %q\n", imagesAfter) + } + } + imgID, err := inspectField("busybox-test", "Id") + c.Assert(err, check.IsNil) + + // first checkout without force it fails + out, _, err = dockerCmdWithError(c, "rmi", imgID) + if err == nil || !strings.Contains(out, fmt.Sprintf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", imgID)) { + c.Fatalf("rmi tagged in multiple repos should have failed without force:%s, %v", out, err) + } + + dockerCmd(c, "rmi", "-f", imgID) + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + if strings.Contains(imagesAfter, imgID[:12]) { + c.Fatalf("rmi -f %s failed, image still exists: %q\n\n", imgID, imagesAfter) + } + } +} + +func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { + container := "test-delete-tag" + newtag := "busybox:newtag" + bb := "busybox:latest" + if out, _, err := dockerCmdWithError(c, "tag", bb, newtag); err != nil { + c.Fatalf("Could not tag busybox: %v: %s", err, out) + } + if out, _, err := dockerCmdWithError(c, "run", "--name", container, bb, "/bin/true"); err != nil { + c.Fatalf("Could not run busybox: %v: %s", err, out) + } + out, _, err := dockerCmdWithError(c, "rmi", newtag) + if err != nil { + c.Fatalf("Could not remove tag %s: %v: %s", newtag, err, out) + } + if d := strings.Count(out, "Untagged: "); d != 1 { + c.Fatalf("Expected 1 untagged entry got %d: %q", d, out) + } +} + +func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { + image := "busybox-clone" + + cmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "-") + cmd.Stdin = strings.NewReader(`FROM busybox +MAINTAINER foo`) + + if out, _, err := runCommandWithOutput(cmd); err != nil { + c.Fatalf("Could not build %s: %s, %v", image, out, err) + } + + if out, _, err := dockerCmdWithError(c, "run", "--name", "test-force-rmi", image, "/bin/true"); err != nil { + c.Fatalf("Could not run container: %s, %v", out, err) + } + + if out, _, err := dockerCmdWithError(c, "rmi", "-f", image); err != nil { + c.Fatalf("Could not remove image %s: %s, %v", image, out, err) + } +} + +func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) { + newRepo := "127.0.0.1:5000/busybox" + oldRepo := "busybox" + newTag := "busybox:test" + out, _, err := dockerCmdWithError(c, "tag", oldRepo, newRepo) + if err != nil { + c.Fatalf("Could not tag busybox: %v: %s", err, out) + } + + out, _, err = dockerCmdWithError(c, "run", "--name", "test", oldRepo, "touch", "/home/abcd") + if err != nil { + c.Fatalf("failed to run container: %v, output: %s", err, out) + } + + out, _, err = dockerCmdWithError(c, "commit", "test", newTag) + if err != nil { + c.Fatalf("failed to commit container: %v, output: %s", err, out) + } + + out, _, err = dockerCmdWithError(c, "rmi", newTag) + if err != nil { + c.Fatalf("failed to remove image: %v, output: %s", err, out) + } + if !strings.Contains(out, "Untagged: "+newTag) { + c.Fatalf("Could not remove image %s: %s, %v", newTag, out, err) + } +} + +func (s *DockerSuite) TestRmiBlank(c *check.C) { + // try to delete a blank image name + out, _, err := dockerCmdWithError(c, "rmi", "") + if err == nil { + c.Fatal("Should have failed to delete '' image") + } + if strings.Contains(out, "No such image") { + c.Fatalf("Wrong error message generated: %s", out) + } + if !strings.Contains(out, "Image name can not be blank") { + c.Fatalf("Expected error message not generated: %s", out) + } + + out, _, err = dockerCmdWithError(c, "rmi", " ") + if err == nil { + c.Fatal("Should have failed to delete '' image") + } + if !strings.Contains(out, "No such image") { + c.Fatalf("Expected error message not generated: %s", out) + } +} + +func (s *DockerSuite) TestRmiContainerImageNotFound(c *check.C) { + // Build 2 images for testing. + imageNames := []string{"test1", "test2"} + imageIds := make([]string, 2) + for i, name := range imageNames { + dockerfile := fmt.Sprintf("FROM busybox\nMAINTAINER %s\nRUN echo %s\n", name, name) + id, err := buildImage(name, dockerfile, false) + c.Assert(err, check.IsNil) + imageIds[i] = id + } + + // Create a long-running container. + dockerCmd(c, "run", "-d", imageNames[0], "top") + + // Create a stopped container, and then force remove its image. + dockerCmd(c, "run", imageNames[1], "true") + dockerCmd(c, "rmi", "-f", imageIds[1]) + + // Try to remove the image of the running container and see if it fails as expected. + out, _, err := dockerCmdWithError(c, "rmi", "-f", imageIds[0]) + if err == nil || !strings.Contains(out, "is using it") { + c.Log(out) + c.Fatal("The image of the running container should not be removed.") + } +} diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go new file mode 100644 index 00000000..92889574 --- /dev/null +++ b/integration-cli/docker_cli_run_test.go @@ -0,0 +1,2778 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/nat" + "github.com/docker/libnetwork/resolvconf" + "github.com/go-check/check" +) + +// "test123" should be printed by docker run +func (s *DockerSuite) TestRunEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "echo", "test123") + if out != "test123\n" { + c.Fatalf("container should've printed 'test123'") + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoStdoutWithMemoryLimit(c *check.C) { + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-m", "16m", "busybox", "echo", "test") + out = strings.Trim(out, "\r\n") + + if expected := "test"; out != expected { + c.Fatalf("container should've printed %q but printed %q", expected, out) + } +} + +// should run without memory swap +func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) { + testRequires(c, NativeExecDriver) + dockerCmd(c, "run", "-m", "16m", "--memory-swap", "-1", "busybox", "true") +} + +func (s *DockerSuite) TestRunWithSwappiness(c *check.C) { + dockerCmd(c, "run", "--memory-swappiness", "0", "busybox", "true") +} + +func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--memory-swappiness", "101", "busybox", "true") + if err == nil { + c.Fatalf("failed. test was able to set invalid value, output: %q", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoStdoutWitCPULimit(c *check.C) { + out, _ := dockerCmd(c, "run", "-c", "1000", "busybox", "echo", "test") + if out != "test\n" { + c.Errorf("container should've printed 'test'") + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoStdoutWithCPUAndMemoryLimit(c *check.C) { + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-c", "1000", "-m", "16m", "busybox", "echo", "test") + if out != "test\n" { + c.Errorf("container should've printed 'test', got %q instead", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") + if out != "test\n" { + c.Errorf("container should've printed 'test'") + } +} + +// docker run should not leak file descriptors +func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd") + + // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory + if out != "0 1 2 3\n" { + c.Errorf("container should've printed '0 1 2 3', not: %s", out) + } +} + +// it should be possible to lookup Google DNS +// this will fail when Internet access is unavailable +func (s *DockerSuite) TestRunLookupGoogleDns(c *check.C) { + testRequires(c, Network) + dockerCmd(c, "run", "busybox", "nslookup", "google.com") +} + +// the exit code should be 0 +// some versions of lxc might make this test fail +func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { + dockerCmd(c, "run", "busybox", "true") +} + +// the exit code should be 1 +// some versions of lxc might make this test fail +func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { + _, exitCode, err := dockerCmdWithError(c, "run", "busybox", "false") + if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { + c.Fatal(err) + } + if exitCode != 1 { + c.Errorf("container should've exited with exit code 1") + } +} + +// it should be possible to pipe in data via stdin to a process running in a container +// some versions of lxc might make this test fail +func (s *DockerSuite) TestRunStdinPipe(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") + runCmd.Stdin = strings.NewReader("blahblah") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + logsOut, _ := dockerCmd(c, "logs", out) + + containerLogs := strings.TrimSpace(logsOut) + if containerLogs != "blahblah" { + c.Errorf("logs didn't print the container's logs %s", containerLogs) + } + + dockerCmd(c, "rm", out) +} + +// the container's ID should be printed when starting a container in detached mode +func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + rmOut, _ := dockerCmd(c, "rm", out) + + rmOut = strings.TrimSpace(rmOut) + if rmOut != out { + c.Errorf("rm didn't print the container ID %s %s", out, rmOut) + } +} + +// the working directory should be set correctly +func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { + out, _ := dockerCmd(c, "run", "-w", "/root", "busybox", "pwd") + + out = strings.TrimSpace(out) + if out != "/root" { + c.Errorf("-w failed to set working directory") + } + + out, _ = dockerCmd(c, "run", "--workdir", "/root", "busybox", "pwd") + out = strings.TrimSpace(out) + if out != "/root" { + c.Errorf("--workdir failed to set working directory") + } +} + +// pinging Google's DNS resolver should fail when we disable the networking +func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { + out, exitCode, err := dockerCmdWithError(c, "run", "--net=none", "busybox", "ping", "-c", "1", "8.8.8.8") + if err != nil && exitCode != 1 { + c.Fatal(out, err) + } + if exitCode != 1 { + c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } + + out, exitCode, err = dockerCmdWithError(c, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8") + if err != nil && exitCode != 1 { + c.Fatal(out, err) + } + if exitCode != 1 { + c.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } +} + +//test --link use container name to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { + dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox") + + ip, err := inspectField("parent", "NetworkSettings.IPAddress") + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container name to link target failed") + } +} + +//test --link use container id to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerId(c *check.C) { + cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox") + + cID = strings.TrimSpace(cID) + ip, err := inspectField(cID, "NetworkSettings.IPAddress") + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container id to link target failed") + } +} + +// Issue 9677. +func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) { + out, _, err := dockerCmdWithError(c, "--selinux-enabled", "run", "-i", "-t", "busybox", "true") + if err != nil { + if !strings.Contains(out, "must follow the 'docker daemon' command") && // daemon + !strings.Contains(out, "flag provided but not defined: --selinux-enabled") { // no daemon (client-only) + c.Fatal(err, out) + } + } +} + +// Regression test for #4979 +func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { + out, exitCode := dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") + if exitCode != 0 { + c.Fatal("1", out, exitCode) + } + + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") + if exitCode != 0 { + c.Fatal("2", out, exitCode) + } +} + +// Volume path is a symlink which also exists on the host, and the host side is a file not a dir +// But the volume call is just a normal volume, not a bind mount +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, NativeExecDriver) + name := "test-volume-symlink" + + dir, err := ioutil.TempDir("", name) + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) + if err != nil { + c.Fatal(err) + } + f.Close() + + dockerFile := fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) + if _, err := buildImage(name, dockerFile, false); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", "/test/test", name) +} + +func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { + if _, code, err := dockerCmdWithError(c, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +func (s *DockerSuite) TestRunVolumesFromInReadonlyMode(c *check.C) { + dockerCmd(c, "run", "--name", "parent", "-v", "/test", "busybox", "true") + + if _, code, err := dockerCmdWithError(c, "run", "--volumes-from", "parent:ro", "busybox", "touch", "/test/file"); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +// Regression test for #1201 +func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { + dockerCmd(c, "run", "--name", "parent", "-v", "/test", "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") + + if out, _, err := dockerCmdWithError(c, "run", "--volumes-from", "parent:bar", "busybox", "touch", "/test/file"); err == nil || !strings.Contains(out, "invalid mode for volumes-from: bar") { + c.Fatalf("running --volumes-from foo:bar should have failed with invalid mount mode: %q", out) + } + + dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", "/test/file") +} + +func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { + dockerCmd(c, "run", "--name", "parent", "-v", "/test:/test:ro", "busybox", "true") + + // Expect this "rw" mode to be be ignored since the inherited volume is "ro" + if _, _, err := dockerCmdWithError(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") + } + + dockerCmd(c, "run", "--name", "parent2", "-v", "/test:/test:ro", "busybox", "true") + + // Expect this to be read-only since both are "ro" + if _, _, err := dockerCmdWithError(c, "run", "--volumes-from", "parent2:ro", "busybox", "touch", "/test/file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") + } +} + +// Test for GH#10618 +func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { + mountstr1 := randomUnixTmpDirPath("test1") + ":/someplace" + mountstr2 := randomUnixTmpDirPath("test2") + ":/someplace" + + if out, _, err := dockerCmdWithError(c, "run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate volume definitions") + } else { + if !strings.Contains(out, "Duplicate bind mount") { + c.Fatalf("Expected 'duplicate volume' error, got %v", err) + } + } +} + +// Test for #1351 +func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { + dockerCmd(c, "run", "--name", "parent", "-v", "/test", "busybox", "touch", "/test/foo") + dockerCmd(c, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo") +} + +func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { + dockerCmd(c, "run", "--name", "parent1", "-v", "/test", "busybox", "touch", "/test/foo") + dockerCmd(c, "run", "--name", "parent2", "-v", "/other", "busybox", "touch", "/other/bar") + dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") +} + +// this tests verifies the ID format for the container +func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { + out, exit, err := dockerCmdWithError(c, "run", "-d", "busybox", "true") + if err != nil { + c.Fatal(err) + } + if exit != 0 { + c.Fatalf("expected exit code 0 received %d", exit) + } + + match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) + if err != nil { + c.Fatal(err) + } + if !match { + c.Fatalf("Invalid container ID: %s", out) + } +} + +// Test that creating a container with a volume doesn't crash. Regression test for #995. +func (s *DockerSuite) TestRunCreateVolume(c *check.C) { + dockerCmd(c, "run", "-v", "/var/lib/data", "busybox", "true") +} + +// Test that creating a volume with a symlink in its path works correctly. Test for #5152. +// Note that this bug happens only with symlinks with a target that starts with '/'. +func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { + image := "docker-test-createvolumewithsymlink" + + buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN ln -s home /bar`) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + c.Fatalf("could not build '%s': %v", image, err) + } + + _, exitCode, err := dockerCmdWithError(c, "run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo") + if err != nil { + c.Fatalf("[inspect] err: %v", err) + } + + _, exitCode, err = dockerCmdWithError(c, "rm", "-v", "test-createvolumewithsymlink") + if err != nil || exitCode != 0 { + c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) + } + + _, err = os.Stat(volPath) + if !os.IsNotExist(err) { + c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + } +} + +// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. +func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { + name := "docker-test-volumesfromsymlinkpath" + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN ln -s home /foo + VOLUME ["/foo/bar"]`) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) + } + + _, exitCode, err := dockerCmdWithError(c, "run", "--name", "test-volumesfromsymlinkpath", name) + if err != nil || exitCode != 0 { + c.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode) + } + + _, exitCode, err = dockerCmdWithError(c, "run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls /foo | grep -q bar") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } +} + +func (s *DockerSuite) TestRunExitCode(c *check.C) { + _, exit, err := dockerCmdWithError(c, "run", "busybox", "/bin/sh", "-c", "exit 72") + if err == nil { + c.Fatal("should not have a non nil error") + } + if exit != 72 { + c.Fatalf("expected exit code 72 received %d", exit) + } +} + +func (s *DockerSuite) TestRunUserDefaultsToRoot(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "id") + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + c.Fatalf("expected root user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByName(c *check.C) { + out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id") + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + c.Fatalf("expected root user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByID(c *check.C) { + out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id") + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "-u", "2147483648", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "-u", "-1", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "-u", "0", "busybox", "id") + if err != nil { + c.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserNotFound(c *check.C) { + _, _, err := dockerCmdWithError(c, "run", "-u", "notme", "busybox", "id") + if err == nil { + c.Fatal("unknown user should cause container to fail") + } +} + +func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { + group := sync.WaitGroup{} + group.Add(2) + + errChan := make(chan error, 2) + for i := 0; i < 2; i++ { + go func() { + defer group.Done() + _, _, err := dockerCmdWithError(c, "run", "busybox", "sleep", "2") + errChan <- err + }() + } + + group.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestRunEnvironment(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") + cmd.Env = append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") + actualEnv := []string{} + for i := range actualEnvLxc { + if actualEnvLxc[i] != "container=lxc" { + actualEnv = append(actualEnv, actualEnvLxc[i]) + } + } + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=testing", + "FALSE=true", + "TRUE=false", + "TRICKY=tri", + "cky", + "", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { + // Test to make sure that when we use -e on env vars that are + // not set in our local env that they're removed (if present) in + // the container + + cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") + cmd.Env = appendBaseEnv([]string{}) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") + actualEnv := []string{} + for i := range actualEnvLxc { + if actualEnvLxc[i] != "container=lxc" { + actualEnv = append(actualEnv, actualEnvLxc[i]) + } + } + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { + // Test to make sure that when we use -e on env vars that are + // already in the env that we're overriding them + + cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") + cmd.Env = appendBaseEnv([]string{"HOSTNAME=bar"}) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") + actualEnv := []string{} + for i := range actualEnvLxc { + if actualEnvLxc[i] != "container=lxc" { + actualEnv = append(actualEnv, actualEnvLxc[i]) + } + } + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root2", + "HOSTNAME=bar", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { + dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") +} + +func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { + dockerCmd(c, "run", "--name", "linked", "busybox", "true") + + _, _, err := dockerCmdWithError(c, "run", "--net=host", "--link", "linked:linked", "busybox", "true") + if err == nil { + c.Fatal("Expected error") + } +} + +// #7851 hostname outside container shows FQDN, inside only shortname +// For testing purposes it is not required to set host's hostname directly +// and use "--net=host" (as the original issue submitter did), as the same +// codepath is executed with "docker run -h ". Both were manually +// tested, but this testcase takes the simpler path of using "run -h .." +func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { + out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname") + if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { + c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--cap-drop=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--cap-add=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { + out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { + out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunGroupAdd(c *check.C) { + testRequires(c, NativeExecDriver) + out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=dbus", "--group-add=777", "busybox", "sh", "-c", "id") + + groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),81(dbus),777" + if actual := strings.Trim(out, "\r\n"); actual != groupsList { + c.Fatalf("expected output %s received %s", groupsList, actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { + if _, code, err := dockerCmdWithError(c, "run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 { + c.Fatal("sys should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { + if _, code, err := dockerCmdWithError(c, "run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 { + c.Fatalf("sys should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { + if _, code, err := dockerCmdWithError(c, "run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 { + c.Fatal("proc should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { + if _, code := dockerCmd(c, "run", "--privileged", "busybox", "touch", "/proc/sysrq-trigger"); code != 0 { + c.Fatalf("proc should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunWithCpuset(c *check.C) { + if _, code := dockerCmd(c, "run", "--cpuset", "0", "busybox", "true"); code != 0 { + c.Fatalf("container should run successfully with cpuset of 0") + } +} + +func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { + if _, code := dockerCmd(c, "run", "--cpuset-cpus", "0", "busybox", "true"); code != 0 { + c.Fatalf("container should run successfully with cpuset-cpus of 0") + } +} + +func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) { + if _, code := dockerCmd(c, "run", "--cpuset-mems", "0", "busybox", "true"); code != 0 { + c.Fatalf("container should run successfully with cpuset-mems of 0") + } +} + +func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) { + if _, code := dockerCmd(c, "run", "--blkio-weight", "300", "busybox", "true"); code != 0 { + c.Fatalf("container should run successfully with blkio-weight of 300") + } +} + +func (s *DockerSuite) TestRunWithBlkioInvalidWeight(c *check.C) { + if _, _, err := dockerCmdWithError(c, "run", "--blkio-weight", "5", "busybox", "true"); err == nil { + c.Fatalf("run with invalid blkio-weight should failed") + } +} + +func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null") + deviceLineFields := strings.Fields(out) + deviceLineFields[6] = "" + deviceLineFields[7] = "" + deviceLineFields[8] = "" + expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} + + if !(reflect.DeepEqual(deviceLineFields, expected)) { + c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + } +} + +func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") + if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { + c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { + dockerCmd(c, "run", "busybox", "chroot", "/", "true") +} + +func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { + c.Fatalf("expected output /dev/nulo, received %s", actual) + } +} + +func (s *DockerSuite) TestRunModeHostname(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") + + if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { + c.Fatalf("expected 'testhostname', but says: %q", actual) + } + + out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname") + + hostname, err := os.Hostname() + if err != nil { + c.Fatal(err) + } + if actual := strings.Trim(out, "\r\n"); actual != hostname { + c.Fatalf("expected %q, but says: %q", hostname, actual) + } +} + +func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { + out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") + if out != "/\n" { + c.Fatalf("pwd returned %q (expected /\\n)", s) + } +} + +func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { + dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") +} + +func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "-v", "/:/", "busybox", "ls", "/host") + if err == nil { + c.Fatal(out, err) + } +} + +// Verify that a container gets default DNS when only localhost resolvers exist +func (s *DockerSuite) TestRunDnsDefaultOptions(c *check.C) { + testRequires(c, SameHostDaemon) + + // preserve original resolv.conf for restoring after test + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + // defer restored original conf + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost + // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by + // GetNameservers(), leading to a replacement of nameservers with the default set + tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + + actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + // check that the actual defaults are appended to the commented out + // localhost resolver (which should be preserved) + // NOTE: if we ever change the defaults from google dns, this will break + expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4" + if actual != expected { + c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) + } +} + +func (s *DockerSuite) TestRunDnsOptions(c *check.C) { + out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + // The client will get a warning on stderr when setting DNS to a localhost address; verify this: + if !strings.Contains(stderr, "Localhost DNS setting") { + c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) + } + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "nameserver 127.0.0.1 search mydomain" { + c.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: %q", actual) + } + + out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "busybox", "cat", "/etc/resolv.conf") + + actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) + if actual != "nameserver 127.0.0.1" { + c.Fatalf("expected 'nameserver 127.0.0.1', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDnsOptionsBasedOnHostResolvConf(c *check.C) { + testRequires(c, SameHostDaemon) + + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNamservers := resolvconf.GetNameservers(origResolvConf) + hostSearch := resolvconf.GetSearchDomains(origResolvConf) + + var out string + out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") + + if actualNameservers := resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "127.0.0.1" { + c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) + } + + actualSearch := resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } + + out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + actualNameservers := resolvconf.GetNameservers([]byte(out)) + if len(actualNameservers) != len(hostNamservers) { + c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers)) + } + for i := range actualNameservers { + if actualNameservers[i] != hostNamservers[i] { + c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNamservers[i]) + } + } + + if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { + c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) + } + + // test with file + tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNamservers = resolvconf.GetNameservers(resolvConf) + hostSearch = resolvconf.GetSearchDomains(resolvConf) + + out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + if actualNameservers = resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { + c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) + } + + actualSearch = resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } +} + +// Test to see if a non-root user can resolve a DNS name and reach out to it. Also +// check if the container resolv.conf file has atleast 0644 perm. +func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { + testRequires(c, SameHostDaemon, Network) + + dockerCmd(c, "run", "--name=testperm", "--user=default", "busybox", "ping", "-c", "1", "apt.dockerproject.org") + + cID, err := getIDByName("testperm") + if err != nil { + c.Fatal(err) + } + + fmode := (os.FileMode)(0644) + finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) + if err != nil { + c.Fatal(err) + } + + if (finfo.Mode() & fmode) != fmode { + c.Fatalf("Expected container resolv.conf mode to be atleast %s, instead got %s", fmode.String(), finfo.Mode().String()) + } +} + +// Test if container resolv.conf gets updated the next time it restarts +// if host /etc/resolv.conf has changed. This only applies if the container +// uses the host's /etc/resolv.conf and does not have any dns options provided. +func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { + testRequires(c, SameHostDaemon) + + tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78") + tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") + + //take a copy of resolv.conf for restoring after test completes + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // This test case is meant to test monitoring resolv.conf when it is + // a regular file not a bind mounc. So we unmount resolv.conf and replace + // it with a file containing the original settings. + cmd := exec.Command("umount", "/etc/resolv.conf") + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + //cleanup + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + }() + + //1. test that a restarting container gets an updated resolv.conf + dockerCmd(c, "run", "--name='first'", "busybox", "true") + containerID1, err := getIDByName("first") + if err != nil { + c.Fatal(err) + } + + // replace resolv.conf with our temporary copy + bytesResolvConf := []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // check for update in container + containerResolv, err := readContainerFile(containerID1, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) + } + + /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } */ + //2. test that a restarting container does not receive resolv.conf updates + // if it modified the container copy of the starting point resolv.conf + dockerCmd(c, "run", "--name='second'", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") + containerID2, err := getIDByName("second") + if err != nil { + c.Fatal(err) + } + + //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // start the container again + dockerCmd(c, "start", "second") + + // check for update in container + containerResolv, err = readContainerFile(containerID2, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + if bytes.Equal(containerResolv, resolvConfSystem) { + c.Fatalf("Restarting a container after container updated resolv.conf should not pick up host changes; expected %q, got %q", string(containerResolv), string(resolvConfSystem)) + } + + //3. test that a running container's resolv.conf is not modified while running + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + runningContainerID := strings.TrimSpace(out) + + // replace resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // check for update in container + containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + if bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) + } + + //4. test that a running container's resolv.conf is updated upon restart + // (the above container is still running..) + dockerCmd(c, "restart", runningContainerID) + + // check for update in container + containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) + } + + //5. test that additions of a localhost resolver are cleaned from + // host resolv.conf before updating container's resolv.conf copies + + // replace resolv.conf with a localhost-only nameserver copy + bytesResolvConf = []byte(tmpLocalhostResolvConf) + if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // our first exited container ID should have been updated, but with default DNS + // after the cleanup of resolv.conf found only a localhost nameserver: + containerResolv, err = readContainerFile(containerID1, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4" + if !bytes.Equal(containerResolv, []byte(expected)) { + c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) + } + + //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update + // of containers' resolv.conf. + + // Restore the original resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // Run the container so it picks up the old settings + dockerCmd(c, "run", "--name='third'", "busybox", "true") + containerID3, err := getIDByName("third") + if err != nil { + c.Fatal(err) + } + + // Create a modified resolv.conf.aside and override resolv.conf with it + bytesResolvConf = []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "third") + + // check for update in container + containerResolv, err = readContainerFile(containerID3, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) + } + + //cleanup, restore original resolv.conf happens in defer func() +} + +func (s *DockerSuite) TestRunAddHost(c *check.C) { + out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") + + actual := strings.Trim(out, "\r\n") + if actual != "86.75.30.9\textra" { + c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode +// but using --attach instead of -a to make sure we read the flag correctly +func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") + _, stderr, _, err := runCommandWithStdoutStderr(cmd) + if err == nil { + c.Fatal("Container should have exited with error code different than 0") + } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { + c.Fatal("Should have been returned an error with conflicting options -a and -d") + } +} + +func (s *DockerSuite) TestRunState(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + state, err := inspectField(id, "State.Running") + c.Assert(err, check.IsNil) + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1, err := inspectField(id, "State.Pid") + c.Assert(err, check.IsNil) + if pid1 == "0" { + c.Fatal("Container state Pid 0") + } + + dockerCmd(c, "stop", id) + state, err = inspectField(id, "State.Running") + c.Assert(err, check.IsNil) + if state != "false" { + c.Fatal("Container state is 'running'") + } + pid2, err := inspectField(id, "State.Pid") + c.Assert(err, check.IsNil) + if pid2 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + + dockerCmd(c, "start", id) + state, err = inspectField(id, "State.Running") + c.Assert(err, check.IsNil) + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid3, err := inspectField(id, "State.Pid") + c.Assert(err, check.IsNil) + if pid3 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } +} + +// Test for #1737 +func (s *DockerSuite) TestRunCopyVolumeUidGid(c *check.C) { + name := "testrunvolumesuidgid" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that the uid and gid is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) + } +} + +// Test for #1582 +func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { + name := "testruncopyvolumecontent" + _, err := buildImage(name, + `FROM busybox + RUN mkdir -p /hello/local && echo hello > /hello/local/world`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that the content is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") + if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { + c.Fatal("Container failed to transfer content to volume") + } +} + +func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { + name := "testrunmdcleanuponentrypoint" + if _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["echo"] + CMD ["testingpoint"]`, + true); err != nil { + c.Fatal(err) + } + + out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) + if exit != 0 { + c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) + } + out = strings.TrimSpace(out) + if out != "root" { + c.Fatalf("Expected output root, got %q", out) + } +} + +// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected +func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { + out, exit, err := dockerCmdWithError(c, "run", "-w", "/bin/cat", "busybox") + if !(err != nil && exit == 1 && strings.Contains(out, "Cannot mkdir: /bin/cat is not a directory")) { + c.Fatalf("Docker must complains about making dir, but we got out: %s, exit: %d, err: %s", out, exit, err) + } +} + +func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { + name := "testrunexitonstdinclose" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", "/bin/cat") + + stdin, err := runCmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + stdout, err := runCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := runCmd.Start(); err != nil { + c.Fatal(err) + } + if _, err := stdin.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + c.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "hello" { + c.Fatalf("Output should be 'hello', got '%q'", line) + } + if err := stdin.Close(); err != nil { + c.Fatal(err) + } + finish := make(chan error) + go func() { + finish <- runCmd.Wait() + close(finish) + }() + select { + case err := <-finish: + c.Assert(err, check.IsNil) + case <-time.After(1 * time.Second): + c.Fatal("docker run failed to exit on stdin close") + } + state, err := inspectField(name, "State.Running") + c.Assert(err, check.IsNil) + + if state != "false" { + c.Fatal("Container must be stopped after stdin closing") + } +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { + name := "writehosts" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/hosts should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func eqToBaseDiff(out string, c *check.C) bool { + out1, _ := dockerCmd(c, "run", "-d", "busybox", "echo", "hello") + cID := strings.TrimSpace(out1) + + baseDiff, _ := dockerCmd(c, "diff", cID) + baseArr := strings.Split(baseDiff, "\n") + sort.Strings(baseArr) + outArr := strings.Split(out, "\n") + sort.Strings(outArr) + return sliceEq(baseArr, outArr) +} + +func sliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) { + name := "writehostname" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/hostname should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) { + name := "writeresolv" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/resolv.conf should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { + name := "baddevice" + out, _, err := dockerCmdWithError(c, "run", "--name", name, "--device", "/etc", "busybox", "true") + + if err == nil { + c.Fatal("Run should fail with bad device") + } + expected := `"/etc": not a device node` + if !strings.Contains(out, expected) { + c.Fatalf("Output should contain %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunEntrypoint(c *check.C) { + name := "entrypoint" + out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "/bin/echo", "busybox", "-n", "foobar") + + expected := "foobar" + if out != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunBindMounts(c *check.C) { + testRequires(c, SameHostDaemon) + + tmpDir, err := ioutil.TempDir("", "docker-test-container") + if err != nil { + c.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + writeFile(path.Join(tmpDir, "touch-me"), "", c) + + // Test reading from a read-only bind mount + out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox", "ls", "/tmp") + if !strings.Contains(out, "touch-me") { + c.Fatal("Container failed to read from bind mount") + } + + // test writing to bind mount + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") + + readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + + // test mounting to an illegal destination directory + _, _, err = dockerCmdWithError(c, "run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") + if err == nil { + c.Fatal("Container bind mounted illegal directory") + } + + // test mount a file + dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") + content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + expected := "yotta" + if content != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, content) + } +} + +// Ensure that CIDFile gets deleted if it's empty +// Perform this test by making `docker run` fail +func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpCidFile := path.Join(tmpDir, "cid") + + out, _, err := dockerCmdWithError(c, "run", "--cidfile", tmpCidFile, "emptyfs") + if err == nil { + c.Fatalf("Run without command must fail. out=%s", out) + } else if !strings.Contains(out, "No command specified") { + c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) + } + + if _, err := os.Stat(tmpCidFile); err == nil { + c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) + } +} + +// #2098 - Docker cidFiles only contain short version of the containerId +//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" +// TestRunCidFile tests that run --cidfile returns the longid +func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + tmpCidFile := path.Join(tmpDir, "cid") + defer os.RemoveAll(tmpDir) + + out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") + + id := strings.TrimSpace(out) + buffer, err := ioutil.ReadFile(tmpCidFile) + if err != nil { + c.Fatal(err) + } + cid := string(buffer) + if len(cid) != 64 { + c.Fatalf("--cidfile should be a long id, not %q", id) + } + if cid != id { + c.Fatalf("cid must be equal to %s, got %s", id, cid) + } +} + +func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { + mac := "12:34:56:78:9a:bc" + + out, _ := dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") + + actualMac := strings.TrimSpace(out) + if actualMac != mac { + c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + } +} + +func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { + mac := "12:34:56:78:9a:bc" + out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top") + + id := strings.TrimSpace(out) + inspectedMac, err := inspectField(id, "NetworkSettings.MacAddress") + c.Assert(err, check.IsNil) + if inspectedMac != mac { + c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) + } +} + +// test docker run use a invalid mac address +func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--mac-address", "92:d0:c6:0a:29", "busybox") + //use a invalid mac address should with a error out + if err == nil || !strings.Contains(out, "is not a valid mac address") { + c.Fatalf("run with an invalid --mac-address should with error out") + } +} + +func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") + + id := strings.TrimSpace(out) + ip, err := inspectField(id, "NetworkSettings.IPAddress") + c.Assert(err, check.IsNil) + iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), + "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") + out, _, err = runCommandWithOutput(iptCmd) + if err != nil { + c.Fatal(err, out) + } + if err := deleteContainer(id); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") +} + +func (s *DockerSuite) TestRunPortInUse(c *check.C) { + testRequires(c, SameHostDaemon) + + port := "1234" + dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top") + + out, _, err := dockerCmdWithError(c, "run", "-d", "-p", port+":80", "busybox", "top") + if err == nil { + c.Fatalf("Binding on used port must fail") + } + if !strings.Contains(out, "port is already allocated") { + c.Fatalf("Out must be about \"port is already allocated\", got %s", out) + } +} + +// https://github.com/docker/docker/issues/12148 +func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { + // allocate a dynamic port to get the most recent + out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id, "80") + + strPort := strings.Split(strings.TrimSpace(out), ":")[1] + port, err := strconv.ParseInt(strPort, 10, 64) + if err != nil { + c.Fatalf("invalid port, got: %s, error: %s", strPort, err) + } + + // allocate a static port and a dynamic port together, with static port + // takes the next recent port in dynamic port range. + dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top") +} + +// Regression test for #7792 +func (s *DockerSuite) TestRunMountOrdering(c *check.C) { + testRequires(c, SameHostDaemon) + + tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + // Create a temporary tmpfs mounc. + fooDir := filepath.Join(tmpDir, "foo") + if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { + c.Fatalf("failed to mkdir at %s - %s", fooDir, err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", + "-v", fmt.Sprintf("%s:/tmp", tmpDir), + "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), + "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), + "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), + "busybox:latest", "sh", "-c", + "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me") +} + +// Regression test for https://github.com/docker/docker/issues/8259 +func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { + testRequires(c, SameHostDaemon) + + tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + linkPath := os.TempDir() + "/testlink2" + if err := os.Symlink(tmpDir, linkPath); err != nil { + c.Fatal(err) + } + defer os.RemoveAll(linkPath) + + // Create first container + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") + + // Create second container with same symlinked path + // This will fail if the referenced issue is hit with a "Volume exists" error + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") +} + +//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container +func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { + out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") + if !strings.Contains(out, "nameserver 127.0.0.1") { + c.Fatal("/etc volume mount hides /etc/resolv.conf") + } + + out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") + if !strings.Contains(out, "test123") { + c.Fatal("/etc volume mount hides /etc/hostname") + } + + out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") + out = strings.Replace(out, "\n", " ", -1) + if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { + c.Fatal("/etc volume mount hides /etc/hosts") + } +} + +func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { + if _, err := buildImage("dataimage", + `FROM busybox + RUN mkdir -p /foo + RUN touch /foo/bar`, + true); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--name", "test", "-v", "/foo", "busybox") + + if out, _, err := dockerCmdWithError(c, "run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) + } + + tmpDir := randomUnixTmpDirPath("docker_test_bind_mount_copy_data") + if out, _, err := dockerCmdWithError(c, "run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + } +} + +func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { + // just run with unknown image + cmd := exec.Command(dockerBinary, "run", "asdfsg") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err == nil { + c.Fatal("Run with unknown image should fail") + } + if stdout.Len() != 0 { + c.Fatalf("Stdout contains output from pull: %s", stdout) + } +} + +func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { + if _, err := buildImage("run_volumes_clean_paths", + `FROM busybox + VOLUME /foo/`, + true); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") + + out, err := inspectMountSourceField("dark_helmet", "/foo/") + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out) + } + + out, err = inspectMountSourceField("dark_helmet", "/foo") + c.Assert(err, check.IsNil) + if !strings.Contains(out, volumesConfigPath) { + c.Fatalf("Volume was not defined for /foo\n%q", out) + } + + out, err = inspectMountSourceField("dark_helmet", "/bar/") + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out) + } + + out, err = inspectMountSourceField("dark_helmet", "/bar") + c.Assert(err, check.IsNil) + if !strings.Contains(out, volumesConfigPath) { + c.Fatalf("Volume was not defined for /bar\n%q", out) + } +} + +// Regression test for #3631 +func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { + cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") + + stdout, err := cont.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := cont.Start(); err != nil { + c.Fatal(err) + } + n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) + if err != nil { + c.Fatal(err) + } + + expected := 2 * 1024 * 2000 + if n != expected { + c.Fatalf("Expected %d, got %d", expected, n) + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") + + id := strings.TrimSpace(out) + portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") + c.Assert(err, check.IsNil) + var ports nat.PortMap + if err = unmarshalJSON([]byte(portstr), &ports); err != nil { + c.Fatal(err) + } + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatalf("Port is not mapped for the port %d", port) + } + } +} + +// test docker run expose a invalid port +func (s *DockerSuite) TestRunExposePort(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--expose", "80000", "busybox") + //expose a invalid port should with a error out + if err == nil || !strings.Contains(out, "Invalid range format for --expose") { + c.Fatalf("run --expose a invalid port should with error out") + } +} + +func (s *DockerSuite) TestRunUnknownCommand(c *check.C) { + testRequires(c, NativeExecDriver) + out, _, _ := dockerCmdWithStdoutStderr(c, "create", "busybox", "/bin/nada") + + cID := strings.TrimSpace(out) + _, _, err := dockerCmdWithError(c, "start", cID) + c.Assert(err, check.NotNil) + + rc, err := inspectField(cID, "State.ExitCode") + c.Assert(err, check.IsNil) + if rc == "0" { + c.Fatalf("ExitCode(%v) cannot be 0", rc) + } +} + +func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { + testRequires(c, SameHostDaemon) + + hostIpc, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc != out { + c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc == out { + c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out) + } +} + +func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + state, err := inspectField(id, "State.Running") + c.Assert(err, check.IsNil) + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1, err := inspectField(id, "State.Pid") + c.Assert(err, check.IsNil) + + parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if parentContainerIpc != out { + c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "-d", "--ipc", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run IPC from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + pid1, err := inspectField(id, "State.Pid") + c.Assert(err, check.IsNil) + + parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if parentContainerNet != out { + c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out) + } +} + +func (s *DockerSuite) TestRunModePidHost(c *check.C) { + testRequires(c, NativeExecDriver, SameHostDaemon) + + hostPid, err := os.Readlink("/proc/1/ns/pid") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid != out { + c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid == out { + c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out) + } +} + +func (s *DockerSuite) TestRunModeUTSHost(c *check.C) { + testRequires(c, NativeExecDriver, SameHostDaemon) + + hostUTS, err := os.Readlink("/proc/1/ns/uts") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS != out { + c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS == out { + c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out) + } +} + +func (s *DockerSuite) TestRunTLSverify(c *check.C) { + if out, code, err := dockerCmdWithError(c, "ps"); err != nil || code != 0 { + c.Fatalf("Should have worked: %v:\n%v", err, out) + } + + // Regardless of whether we specify true or false we need to + // test to make sure tls is turned on if --tlsverify is specified at all + out, code, err := dockerCmdWithError(c, "--tlsverify=false", "ps") + if err == nil || code == 0 || !strings.Contains(out, "trying to connect") { + c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", code, out, err) + } + + out, code, err = dockerCmdWithError(c, "--tlsverify=true", "ps") + if err == nil || code == 0 || !strings.Contains(out, "cert") { + c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", code, out, err) + } +} + +func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { + // first find allocator current position + out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id) + + out = strings.TrimSpace(out) + if out == "" { + c.Fatal("docker port command output is empty") + } + out = strings.Split(out, ":")[1] + lastPort, err := strconv.Atoi(out) + if err != nil { + c.Fatal(err) + } + port := lastPort + 1 + l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + c.Fatal(err) + } + defer l.Close() + + out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id = strings.TrimSpace(out) + dockerCmd(c, "port", id) +} + +func (s *DockerSuite) TestRunTtyWithPipe(c *check.C) { + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "cannot enable tty mode" + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("run should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("container is running but should have failed") + } +} + +func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { + addr := "00:16:3E:08:00:50" + + if out, _ := dockerCmd(c, "run", "--mac-address", addr, "busybox", "ifconfig"); !strings.Contains(out, addr) { + c.Fatalf("Output should have contained %q: %s", addr, out) + } +} + +func (s *DockerSuite) TestRunNetHost(c *check.C) { + testRequires(c, SameHostDaemon) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet == out { + c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out) + } +} + +func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) { + testRequires(c, SameHostDaemon) + + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") +} + +func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { + testRequires(c, SameHostDaemon) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Container should have host network namespace") + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") + + id := strings.TrimSpace(out) + portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") + c.Assert(err, check.IsNil) + + var ports nat.PortMap + err = unmarshalJSON([]byte(portstr), &ports) + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatal("Port is not mapped for the port "+port, out) + } + } +} + +func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + out, err := inspectField("test", "HostConfig.RestartPolicy.Name") + c.Assert(err, check.IsNil) + if out != "no" { + c.Fatalf("Set default restart policy failed") + } +} + +func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + + id := strings.TrimSpace(string(out)) + if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 10); err != nil { + c.Fatal(err) + } + + count, err := inspectField(id, "RestartCount") + c.Assert(err, check.IsNil) + if count != "3" { + c.Fatalf("Container was restarted %s times, expected %d", count, 3) + } + + MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(err, check.IsNil) + if MaximumRetryCount != "3" { + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") + } +} + +func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "touch", "/file") +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { + testRequires(c, NativeExecDriver) + + for _, f := range []string{"/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me"} { + testReadOnlyFile(f, c) + } +} + +func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { + testRequires(c, NativeExecDriver) + + // Ensure we have not broken writing /dev/pts + out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount") + if status != 0 { + c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.") + } + expected := "type devpts (rw," + if !strings.Contains(string(out), expected) { + c.Fatalf("expected output to contain %s but contains %s", expected, out) + } +} + +func testReadOnlyFile(filename string, c *check.C) { + testRequires(c, NativeExecDriver) + + out, _, err := dockerCmdWithError(c, "run", "--read-only", "--rm", "busybox", "touch", filename) + if err == nil { + c.Fatal("expected container to error on run with read only error") + } + expected := "Read-only file system" + if !strings.Contains(string(out), expected) { + c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) + } + + out, _, err = dockerCmdWithError(c, "run", "--read-only", "--privileged", "--rm", "busybox", "touch", filename) + if err == nil { + c.Fatal("expected container to error on run with read only error") + } + expected = "Read-only file system" + if !strings.Contains(string(out), expected) { + c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) { + testRequires(c, NativeExecDriver) + + dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts") + if !strings.Contains(string(out), "testlinked") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDnsFlag(c *check.C) { + testRequires(c, NativeExecDriver) + + out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf") + if !strings.Contains(string(out), "1.1.1.1") { + c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) { + testRequires(c, NativeExecDriver) + + out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(string(out), "testreadonly") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used") + } +} + +func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "voltest", "-v", "/foo", "busybox") + dockerCmd(c, "run", "-d", "--name", "restarter", "--volumes-from", "voltest", "busybox", "top") + + // Remove the main volume container and restart the consuming container + dockerCmd(c, "rm", "-f", "voltest") + + // This should not fail since the volumes-from were already applied + dockerCmd(c, "restart", "restarter") +} + +// run container with --rm should remove container if exit code != 0 +func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { + name := "flowers" + out, _, err := dockerCmdWithError(c, "run", "--name", name, "--rm", "busybox", "ls", "/notexists") + if err == nil { + c.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + c.Fatal(out, err) + } + + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { + name := "sparkles" + out, _, err := dockerCmdWithError(c, "run", "--name", name, "--rm", "busybox", "commandNotFound") + if err == nil { + c.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + c.Fatal(out, err) + } + + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunPidHostWithChildIsKillable(c *check.C) { + name := "ibuildthecloud" + dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi") + + time.Sleep(1 * time.Second) + errchan := make(chan error) + go func() { + if out, _, err := dockerCmdWithError(c, "kill", name); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + select { + case err := <-errchan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Kill container timed out") + } +} + +func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) { + // this memory limit is 1 byte less than the min, which is 4MB + // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 + out, _, err := dockerCmdWithError(c, "run", "-m", "4194303", "busybox") + if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { + c.Fatalf("expected run to fail when using too low a memory limit: %q", out) + } +} + +func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) { + _, code, err := dockerCmdWithError(c, "run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version") + if err == nil || code == 0 { + c.Fatal("standard container should not be able to write to /proc/asound") + } +} + +func (s *DockerSuite) TestRunReadProcTimer(c *check.C) { + testRequires(c, NativeExecDriver) + out, code, err := dockerCmdWithError(c, "run", "busybox", "cat", "/proc/timer_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadProcLatency(c *check.C) { + testRequires(c, NativeExecDriver) + // some kernels don't have this configured so skip the test if this file is not found + // on the host running the tests. + if _, err := os.Stat("/proc/latency_stats"); err != nil { + c.Skip("kernel doesnt have latency_stats configured") + return + } + out, code, err := dockerCmdWithError(c, "run", "busybox", "cat", "/proc/latency_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) { + testRequires(c, Apparmor) + + testReadPaths := []string{ + "/proc/latency_stats", + "/proc/timer_stats", + "/proc/kcore", + } + for i, filePath := range testReadPaths { + name := fmt.Sprintf("procsieve-%d", i) + shellCmd := fmt.Sprintf("exec 3<%s", filePath) + + out, exitCode, err := dockerCmdWithError(c, "run", "--privileged", "--security-opt", "apparmor:docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if exitCode != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestMountIntoProc(c *check.C) { + testRequires(c, NativeExecDriver) + _, code, err := dockerCmdWithError(c, "run", "-v", "/proc//sys", "busybox", "true") + if err == nil || code == 0 { + c.Fatal("container should not be able to mount into /proc") + } +} + +func (s *DockerSuite) TestMountIntoSys(c *check.C) { + testRequires(c, NativeExecDriver) + dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true") +} + +func (s *DockerSuite) TestRunUnshareProc(c *check.C) { + testRequires(c, Apparmor, NativeExecDriver) + + name := "acidburn" + if out, _, err := dockerCmdWithError(c, "run", "--name", name, "jess/unshare", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount"); err == nil || !strings.Contains(out, "Permission denied") { + c.Fatalf("unshare should have failed with permission denied, got: %s, %v", out, err) + } + + name = "cereal" + if out, _, err := dockerCmdWithError(c, "run", "--name", name, "jess/unshare", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc"); err == nil || !strings.Contains(out, "Permission denied") { + c.Fatalf("unshare should have failed with permission denied, got: %s, %v", out, err) + } + + /* Ensure still fails if running privileged with the default policy */ + name = "crashoverride" + if out, _, err := dockerCmdWithError(c, "run", "--privileged", "--security-opt", "apparmor:docker-default", "--name", name, "jess/unshare", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc"); err == nil || !strings.Contains(out, "Permission denied") { + c.Fatalf("unshare should have failed with permission denied, got: %s, %v", out, err) + } +} + +func (s *DockerSuite) TestRunPublishPort(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top") + out, _ := dockerCmd(c, "port", "test") + out = strings.Trim(out, "\r\n") + if out != "" { + c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out) + } +} + +// Issue #10184. +func (s *DockerSuite) TestDevicePermissions(c *check.C) { + testRequires(c, NativeExecDriver) + const permissions = "crw-rw-rw-" + out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse") + if status != 0 { + c.Fatalf("expected status 0, got %d", status) + } + if !strings.HasPrefix(out, permissions) { + c.Fatalf("output should begin with %q, got %q", permissions, out) + } +} + +func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) { + testRequires(c, NativeExecDriver) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +// https://github.com/docker/docker/pull/14498 +func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { + dockerCmd(c, "run", "--name", "parent", "-v", "/test", "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") + + mRO, err := inspectMountPoint("test-volumes-1", "/test") + c.Assert(err, check.IsNil) + if mRO.RW { + c.Fatalf("Expected RO volume was RW") + } + + mRW, err := inspectMountPoint("test-volumes-2", "/test") + c.Assert(err, check.IsNil) + if !mRW.RW { + c.Fatalf("Expected RW volume was RO") + } +} + +func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) { + testRequires(c, Apparmor, NativeExecDriver) + + testWritePaths := []string{ + /* modprobe and core_pattern should both be denied by generic + * policy of denials for /proc/sys/kernel. These files have been + * picked to be checked as they are particularly sensitive to writes */ + "/proc/sys/kernel/modprobe", + "/proc/sys/kernel/core_pattern", + "/proc/sysrq-trigger", + "/proc/kcore", + } + for i, filePath := range testWritePaths { + name := fmt.Sprintf("writeprocsieve-%d", i) + + shellCmd := fmt.Sprintf("exec 3>%s", filePath) + out, code, err := dockerCmdWithError(c, "run", "--privileged", "--security-opt", "apparmor:docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if code != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { + testRequires(c, SameHostDaemon) + name := "test-nwfiles-mount" + + f, err := ioutil.TempFile("", name) + c.Assert(err, check.IsNil) + + filename := f.Name() + defer os.Remove(filename) + + expected := "test123" + + err = ioutil.WriteFile(filename, []byte(expected), 0644) + c.Assert(err, check.IsNil) + + var actual string + actual, _ = dockerCmd(c, "run", "-v", filename+":/etc/resolv.conf", "busybox", "cat", "/etc/resolv.conf") + if actual != expected { + c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) + } +} + +func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-run") + + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s\n", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try untrusted run to ensure we pushed the tag to the registry + runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + // Try trusted run on untrusted tag + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + c.Fatalf("Error expected when running trusted run with:\n%s", out) + } + + if !strings.Contains(string(out), "no trust data available") { + c.Fatalf("Missing expected output on trusted run:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-run-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "could not validate the path to a trusted root") { + c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out) + } + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try run + runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out) + } + }) +} + +func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + if err != nil { + c.Fatalf("Restarting notary server failed.") + } + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + // Now, try running with the original client from this new trust server. This should fail. + runCmd = exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + if err == nil { + c.Fatalf("Expected to fail on this run due to different remote data: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "failed to validate data with current trusted certificates") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } +} + +func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + pid1, err := inspectField(id, "State.Pid") + c.Assert(err, check.IsNil) + + _, err = os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, Apparmor) + + // Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace + // itself, but pid>1 should not be able to trace pid1. + _, exitCode, _ := dockerCmdWithError(c, "run", "busybox", "sh", "-c", "readlink /proc/1/ns/net") + if exitCode == 0 { + c.Fatal("ptrace was not successfully restricted by AppArmor") + } +} + +func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, Apparmor) + + _, exitCode, _ := dockerCmdWithError(c, "run", "busybox", "readlink", "/proc/1/ns/net") + if exitCode != 0 { + c.Fatal("ptrace of self failed.") + } +} diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go new file mode 100644 index 00000000..295bace8 --- /dev/null +++ b/integration-cli/docker_cli_run_unix_test.go @@ -0,0 +1,416 @@ +// +build !windows + +package main + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #6509 +func (s *DockerSuite) TestRunRedirectStdout(c *check.C) { + checkRedirect := func(command string) { + _, tty, err := pty.Open() + if err != nil { + c.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + if err := cmd.Start(); err != nil { + c.Fatalf("start err: %v", err) + } + ch := make(chan error) + go func() { + ch <- cmd.Wait() + close(ch) + }() + + select { + case <-time.After(10 * time.Second): + c.Fatal("command timeout") + case err := <-ch: + if err != nil { + c.Fatalf("wait err=%v", err) + } + } + } + + checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") + checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") +} + +// Test recursive bind mount works by default +func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { + tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") + if err != nil { + c.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + + // Create a temporary tmpfs mount. + tmpfsDir := filepath.Join(tmpDir, "tmpfs") + if err := os.MkdirAll(tmpfsDir, 0777); err != nil { + c.Fatalf("failed to mkdir at %s - %s", tmpfsDir, err) + } + if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil { + c.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err) + } + + f, err := ioutil.TempFile(tmpfsDir, "touch-me") + if err != nil { + c.Fatal(err) + } + defer f.Close() + + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + c.Fatal(out, stderr, err) + } + if !strings.Contains(out, filepath.Base(f.Name())) { + c.Fatal("Recursive bind mount test failed. Expected file not found") + } +} + +func (s *DockerSuite) TestRunWithUlimits(c *check.C) { + testRequires(c, NativeExecDriver) + + out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n") + ul := strings.TrimSpace(out) + if ul != "42" { + c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { + testRequires(c, NativeExecDriver) + + cgroupParent := "test" + name := "cgroup-test" + + out, _, err := dockerCmdWithError(c, "run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { + testRequires(c, NativeExecDriver) + + cgroupParent := "/cgroup-parent/test" + name := "cgroup-test" + out, _, err := dockerCmdWithError(c, "run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { + testRequires(c, NativeExecDriver) + + filename := "/sys/fs/cgroup/devices/test123" + out, _, err := dockerCmdWithError(c, "run", "busybox", "touch", filename) + if err == nil { + c.Fatal("expected cgroup mount point to be read-only, touch file should fail") + } + expected := "Read-only file system" + if !strings.Contains(out, expected) { + c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) + } +} + +func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { + testRequires(c, NativeExecDriver) + + out, _ := dockerCmd(c, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/") + if actual := strings.Trim(out, "\r\n"); !strings.Contains(out, "timer") { + c.Fatalf("expected output /dev/snd/timer, received %s", actual) + } + + out, _ = dockerCmd(c, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/") + if actual := strings.Trim(out, "\r\n"); !strings.Contains(out, "seq") { + c.Fatalf("expected output /dev/othersnd/seq, received %s", actual) + } +} + +// TestRunDetach checks attaching and detaching with the escape sequence. +func (s *DockerSuite) TestRunAttachDetach(c *check.C) { + name := "attach-detach" + cmd := exec.Command(dockerBinary, "run", "--name", name, "-it", "busybox", "cat") + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + if err := waitRun(name); err != nil { + c.Fatal(err) + } + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write([]byte{16}); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte{17}); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running, err := inspectField(name, "State.Running") + if err != nil { + c.Fatal(err) + } + if running != "true" { + c.Fatal("expected container to still be running") + } + + go func() { + exec.Command(dockerBinary, "kill", name).Run() + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoStdoutWithCPUQuota(c *check.C) { + testRequires(c, cpuCfsQuota) + + out, _, err := dockerCmdWithError(c, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "echo", "test") + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + out = strings.TrimSpace(out) + if out != "test" { + c.Errorf("container should've printed 'test'") + } + + out, err = inspectField("test", "HostConfig.CpuQuota") + c.Assert(err, check.IsNil) + + if out != "8000" { + c.Fatalf("setting the CPU CFS quota failed") + } +} + +func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + + if _, _, err := dockerCmdWithError(c, "run", "--cpu-period", "50000", "--name", "test", "busybox", "true"); err != nil { + c.Fatalf("failed to run container: %v", err) + } + + out, err := inspectField("test", "HostConfig.CpuPeriod") + c.Assert(err, check.IsNil) + if out != "50000" { + c.Fatalf("setting the CPU CFS period failed") + } +} + +func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { + testRequires(c, oomControl) + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError(c, "run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } +} + +func (s *DockerSuite) TestContainerNetworkModeToSelf(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "--name=me", "--net=container:me", "busybox", "true") + if err == nil || !strings.Contains(out, "cannot join own network") { + c.Fatalf("using container net mode to self should result in an error") + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithDnsMacHosts(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "-d", "--name", "parent", "busybox", "top") + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out, _, err = dockerCmdWithError(c, "run", "--dns", "1.2.3.4", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, "Conflicting options: --dns and the network mode") { + c.Fatalf("run --net=container with --dns should error out") + } + + out, _, err = dockerCmdWithError(c, "run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, "--mac-address and the network mode") { + c.Fatalf("run --net=container with --mac-address should error out") + } + + out, _, err = dockerCmdWithError(c, "run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, "--add-host and the network mode") { + c.Fatalf("run --net=container with --add-host should error out") + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _, err := dockerCmdWithError(c, "run", "-p", "5000:5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, "Conflicting options: -p, -P, --publish-all, --publish and the network mode (--net)") { + c.Fatalf("run --net=container with -p should error out") + } + + out, _, err = dockerCmdWithError(c, "run", "-P", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, "Conflicting options: -p, -P, --publish-all, --publish and the network mode (--net)") { + c.Fatalf("run --net=container with -P should error out") + } + + out, _, err = dockerCmdWithError(c, "run", "--expose", "5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, "Conflicting options: --expose and the network mode (--expose)") { + c.Fatalf("run --net=container with --expose should error out") + } +} + +func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { + dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top") + dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top") +} + +func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { + out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") + + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } + } + + if count != 1 { + c.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) + } +} + +// Issue #4681 +func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { + dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") +} + +func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { + testRequires(c, ExecSupport) + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top") + out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname") + out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") + + if out1 != out { + c.Fatal("containers with shared net namespace should have same hostname") + } +} + +func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { + out, _, err := dockerCmdWithError(c, "run", "-d", "--net=none", "busybox", "top") + id := strings.TrimSpace(out) + res, err := inspectField(id, "NetworkSettings.IPAddress") + c.Assert(err, check.IsNil) + if res != "" { + c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) + } +} + +func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) { + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top") + dockerCmd(c, "stop", "first") + dockerCmd(c, "stop", "second") +} diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go new file mode 100644 index 00000000..5b4b64d7 --- /dev/null +++ b/integration-cli/docker_cli_save_load_test.go @@ -0,0 +1,293 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "sort" + "strings" + + "github.com/go-check/check" +) + +// save a repo using gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { + name := "test-save-xz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + out, _ := dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + repoTarball, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + if err != nil { + c.Fatalf("failed to save repo: %v %v", out, err) + } + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = strings.NewReader(repoTarball) + out, _, err = runCommandWithOutput(loadCmd) + if err == nil { + c.Fatalf("expected error, but succeeded with no error and output: %v", out) + } + + after, _, err := dockerCmdWithError(c, "inspect", repoName) + if err == nil { + c.Fatalf("the repo should not exist: %v", after) + } +} + +// save a repo using xz+gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { + name := "test-save-xz-gz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + if err != nil { + c.Fatalf("failed to save repo: %v %v", out, err) + } + + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = strings.NewReader(out) + out, _, err = runCommandWithOutput(loadCmd) + if err == nil { + c.Fatalf("expected error, but succeeded with no error and output: %v", out) + } + + after, _, err := dockerCmdWithError(c, "inspect", repoName) + if err == nil { + c.Fatalf("the repo should not exist: %v", after) + } +} + +func (s *DockerSuite) TestSaveSingleTag(c *check.C) { + repoName := "foobar-save-single-tag-test" + dockerCmd(c, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedImageID := strings.TrimSpace(out) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), + exec.Command("tar", "t"), + exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) + if err != nil { + c.Fatalf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err) + } +} + +func (s *DockerSuite) TestSaveImageId(c *check.C) { + repoName := "foobar-save-image-id-test" + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedLongImageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "images", "-q", repoName) + cleanedShortImageID := strings.TrimSpace(out) + + saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID) + tarCmd := exec.Command("tar", "t") + + var err error + tarCmd.Stdin, err = saveCmd.StdoutPipe() + if err != nil { + c.Fatalf("cannot set stdout pipe for tar: %v", err) + } + grepCmd := exec.Command("grep", cleanedLongImageID) + grepCmd.Stdin, err = tarCmd.StdoutPipe() + if err != nil { + c.Fatalf("cannot set stdout pipe for grep: %v", err) + } + + if err = tarCmd.Start(); err != nil { + c.Fatalf("tar failed with error: %v", err) + } + if err = saveCmd.Start(); err != nil { + c.Fatalf("docker save failed with error: %v", err) + } + defer saveCmd.Wait() + defer tarCmd.Wait() + + out, _, err = runCommandWithOutput(grepCmd) + + if err != nil { + c.Fatalf("failed to save repo with image ID: %s, %v", out, err) + } +} + +// save a repo and try to load it using flags +func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { + name := "test-save-and-load-repo-flags" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + + deleteImages(repoName) + dockerCmd(c, "commit", name, repoName) + + before, _ := dockerCmd(c, "inspect", repoName) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command(dockerBinary, "load")) + if err != nil { + c.Fatalf("failed to save and load repo: %s, %v", out, err) + } + + after, _ := dockerCmd(c, "inspect", repoName) + if before != after { + c.Fatalf("inspect is not the same after a save / load") + } +} + +func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { + repoName := "foobar-save-multi-name-test" + + // Make one image + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) + + // Make two images + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), + exec.Command("tar", "xO", "repositories"), + exec.Command("grep", "-q", "-E", "(-one|-two)"), + ) + if err != nil { + c.Fatalf("failed to save multiple repos: %s, %v", out, err) + } +} + +func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { + + makeImage := func(from string, tag string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID, tag) + imageID := strings.TrimSpace(out) + return imageID + } + + repoName := "foobar-save-multi-images-test" + tagFoo := repoName + ":foo" + tagBar := repoName + ":bar" + + idFoo := makeImage("busybox:latest", tagFoo) + idBar := makeImage("busybox:latest", tagBar) + + deleteImages(repoName) + + // create the archive + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("tar", "t"), + exec.Command("grep", "VERSION"), + exec.Command("cut", "-d", "/", "-f1")) + if err != nil { + c.Fatalf("failed to save multiple images: %s, %v", out, err) + } + actual := strings.Split(strings.TrimSpace(out), "\n") + + // make the list of expected layers + out, _ = dockerCmd(c, "history", "-q", "--no-trunc", "busybox:latest") + expected := append(strings.Split(strings.TrimSpace(out), "\n"), idFoo, idBar) + + sort.Strings(actual) + sort.Strings(expected) + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("archive does not contains the right layers: got %v, expected %v", actual, expected) + } +} + +// Issue #6722 #5892 ensure directories are included in changes +func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { + layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + + name := "save-directory-permissions" + tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") + if err != nil { + c.Errorf("failed to create temporary directory: %s", err) + } + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + + defer os.RemoveAll(tmpDir) + _, err = buildImage(name, + `FROM busybox + RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a + RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, + true) + if err != nil { + c.Fatal(err) + } + + if out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command("tar", "-xf", "-", "-C", extractionDirectory), + ); err != nil { + c.Errorf("failed to save and extract image: %s", out) + } + + dirs, err := ioutil.ReadDir(extractionDirectory) + if err != nil { + c.Errorf("failed to get a listing of the layer directories: %s", err) + } + + found := false + for _, entry := range dirs { + var entriesSansDev []string + if entry.IsDir() { + layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") + + f, err := os.Open(layerPath) + if err != nil { + c.Fatalf("failed to open %s: %s", layerPath, err) + } + + entries, err := listTar(f) + for _, e := range entries { + if !strings.Contains(e, "dev/") { + entriesSansDev = append(entriesSansDev, e) + } + } + if err != nil { + c.Fatalf("encountered error while listing tar entries: %s", err) + } + + if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { + found = true + break + } + } + } + + if !found { + c.Fatalf("failed to find the layer with the right content listing") + } + +} diff --git a/integration-cli/docker_cli_save_load_unix_test.go b/integration-cli/docker_cli_save_load_unix_test.go new file mode 100644 index 00000000..2bca3b85 --- /dev/null +++ b/integration-cli/docker_cli_save_load_unix_test.go @@ -0,0 +1,81 @@ +// +build !windows + +package main + +import ( + "bytes" + "io/ioutil" + "os" + "os/exec" + + "github.com/go-check/check" + "github.com/kr/pty" +) + +// save a repo and try to load it using stdout +func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { + name := "test-save-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + out, _ := dockerCmd(c, "commit", name, repoName) + + before, _ := dockerCmd(c, "inspect", repoName) + + tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar") + c.Assert(err, check.IsNil) + defer os.Remove(tmpFile.Name()) + + saveCmd := exec.Command(dockerBinary, "save", repoName) + saveCmd.Stdout = tmpFile + + if _, err = runCommand(saveCmd); err != nil { + c.Fatalf("failed to save repo: %v", err) + } + + tmpFile, err = os.Open(tmpFile.Name()) + c.Assert(err, check.IsNil) + + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = tmpFile + + if out, _, err = runCommandWithOutput(loadCmd); err != nil { + c.Fatalf("failed to load repo: %s, %v", out, err) + } + + after, _ := dockerCmd(c, "inspect", repoName) + + if before != after { + c.Fatalf("inspect is not the same after a save / load") + } + + deleteImages(repoName) + + pty, tty, err := pty.Open() + if err != nil { + c.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command(dockerBinary, "save", repoName) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + if err := cmd.Start(); err != nil { + c.Fatalf("start err: %v", err) + } + if err := cmd.Wait(); err == nil { + c.Fatal("did not break writing to a TTY") + } + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + if err != nil { + c.Fatal("could not read tty output") + } + + if !bytes.Contains(buf[:n], []byte("Cowardly refusing")) { + c.Fatal("help output is not being yielded", out) + } +} diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go new file mode 100644 index 00000000..d89c05cc --- /dev/null +++ b/integration-cli/docker_cli_search_test.go @@ -0,0 +1,91 @@ +package main + +import ( + "strings" + + "github.com/go-check/check" +) + +// search for repos named "registry" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { + testRequires(c, Network) + + out, exitCode := dockerCmd(c, "search", "busybox") + if exitCode != 0 { + c.Fatalf("failed to search on the central registry: %s", out) + } + + if !strings.Contains(out, "Busybox base image.") { + c.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'") + } +} + +func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { + out, exitCode, err := dockerCmdWithError(c, "search", "--stars=a", "busybox") + if err == nil || exitCode == 0 { + c.Fatalf("Should not get right information: %s, %v", out, err) + } + + if !strings.Contains(out, "invalid value") { + c.Fatal("couldn't find the invalid value warning") + } + + out, exitCode, err = dockerCmdWithError(c, "search", "-s=-1", "busybox") + if err == nil || exitCode == 0 { + c.Fatalf("Should not get right information: %s, %v", out, err) + } + + if !strings.Contains(out, "invalid value") { + c.Fatal("couldn't find the invalid value warning") + } +} + +func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { + testRequires(c, Network) + + out, exitCode := dockerCmd(c, "search", "--help") + if exitCode != 0 { + c.Fatalf("failed to get search help information: %s", out) + } + + if !strings.Contains(out, "Usage:\tdocker search [OPTIONS] TERM") { + c.Fatalf("failed to show docker search usage: %s", out) + } + + outSearchCmd, exitCode := dockerCmd(c, "search", "busybox") + if exitCode != 0 { + c.Fatalf("failed to search on the central registry: %s", outSearchCmd) + } + + outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox") + + if len(outSearchCmd) > len(outSearchCmdNotrunc) { + c.Fatalf("The no-trunc option can't take effect.") + } + + outSearchCmdautomated, exitCode := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + if exitCode != 0 { + c.Fatalf("failed to search with automated=true on the central registry: %s", outSearchCmdautomated) + } + + outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") + for i := range outSearchCmdautomatedSlice { + if strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox ") { + c.Fatalf("The busybox is not an AUTOMATED image: %s", out) + } + } + + outSearchCmdStars, exitCode := dockerCmd(c, "search", "-s=2", "busybox") + if exitCode != 0 { + c.Fatalf("failed to search with stars=2 on the central registry: %s", outSearchCmdStars) + } + + if strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]") { + c.Fatalf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars) + } + + out, exitCode = dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") + if exitCode != 0 { + c.Fatalf("failed to search with stars&automated&no-trunc options on the central registry: %s", out) + } +} diff --git a/integration-cli/docker_cli_service_test.go b/integration-cli/docker_cli_service_test.go new file mode 100644 index 00000000..aaf5e819 --- /dev/null +++ b/integration-cli/docker_cli_service_test.go @@ -0,0 +1,69 @@ +// +build experimental + +package main + +import ( + "fmt" + "strings" + + "github.com/go-check/check" +) + +func assertSrvIsAvailable(c *check.C, sname, name string) { + if !isSrvPresent(c, sname, name) { + c.Fatalf("Service %s on network %s not found in service ls o/p", sname, name) + } +} + +func assertSrvNotAvailable(c *check.C, sname, name string) { + if isSrvPresent(c, sname, name) { + c.Fatalf("Found service %s on network %s in service ls o/p", sname, name) + } +} + +func isSrvPresent(c *check.C, sname, name string) bool { + out, _, _ := dockerCmdWithStdoutStderr(c, "service", "ls") + lines := strings.Split(out, "\n") + for i := 1; i < len(lines)-1; i++ { + if strings.Contains(lines[i], sname) && strings.Contains(lines[i], name) { + return true + } + } + return false +} + +func isCntPresent(c *check.C, cname, sname, name string) bool { + out, _, _ := dockerCmdWithStdoutStderr(c, "service", "ls", "--no-trunc") + lines := strings.Split(out, "\n") + for i := 1; i < len(lines)-1; i++ { + fmt.Println(lines) + if strings.Contains(lines[i], name) && strings.Contains(lines[i], sname) && strings.Contains(lines[i], cname) { + return true + } + } + return false +} + +func (s *DockerSuite) TestDockerServiceCreateDelete(c *check.C) { + dockerCmdWithStdoutStderr(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + + dockerCmdWithStdoutStderr(c, "service", "publish", "s1.test") + assertSrvIsAvailable(c, "s1", "test") + + dockerCmdWithStdoutStderr(c, "service", "unpublish", "s1.test") + assertSrvNotAvailable(c, "s1", "test") + + dockerCmdWithStdoutStderr(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerSuite) TestDockerPublishServiceFlag(c *check.C) { + // Run saying the container is the backend for the specified service on the specified network + out, _ := dockerCmd(c, "run", "-d", "--expose=23", "--publish-service", "telnet.production", "busybox", "top") + cid := strings.TrimSpace(out) + + // Verify container is attached in service ps o/p + assertSrvIsAvailable(c, "telnet", "production") + dockerCmd(c, "rm", "-f", cid) +} diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go new file mode 100644 index 00000000..ce5c48e6 --- /dev/null +++ b/integration-cli/docker_cli_start_test.go @@ -0,0 +1,178 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/go-check/check" +) + +// Regression test for https://github.com/docker/docker/issues/7843 +func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "test", "busybox") + dockerCmd(c, "wait", "test") + + // Expect this to fail because the above container is stopped, this is what we want + if _, _, err := dockerCmdWithError(c, "run", "-d", "--name", "test2", "--link", "test:test", "busybox"); err == nil { + c.Fatal("Expected error but got none") + } + + ch := make(chan error) + go func() { + // Attempt to start attached to the container that won't start + // This should return an error immediately since the container can't be started + if _, _, err := dockerCmdWithError(c, "start", "-a", "test2"); err == nil { + ch <- fmt.Errorf("Expected error but got none") + } + close(ch) + }() + + select { + case err := <-ch: + c.Assert(err, check.IsNil) + case <-time.After(time.Second): + c.Fatalf("Attach did not exit properly") + } +} + +// gh#8555: Exit code should be passed through when using start -a +func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") + out = strings.TrimSpace(out) + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", out) + + startOut, exitCode, err := dockerCmdWithError(c, "start", "-a", out) + if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { + c.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut) + } + if exitCode != 1 { + c.Fatalf("start -a did not respond with proper exit code: expected 1, got %d", exitCode) + } + +} + +func (s *DockerSuite) TestStartAttachSilent(c *check.C) { + name := "teststartattachcorrectexitcode" + dockerCmd(c, "run", "--name", name, "busybox", "echo", "test") + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", name) + + startOut, _ := dockerCmd(c, "start", "-a", name) + if expected := "test\n"; startOut != expected { + c.Fatalf("start -a produced unexpected output: expected %q, got %q", expected, startOut) + } +} + +func (s *DockerSuite) TestStartRecordError(c *check.C) { + + // when container runs successfully, we should not have state.Error + dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + stateErr, err := inspectField("test", "State.Error") + c.Assert(err, check.IsNil) + if stateErr != "" { + c.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) + } + + // Expect this to fail and records error because of ports conflict + out, _, err := dockerCmdWithError(c, "run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top") + if err == nil { + c.Fatalf("Expected error but got none, output %q", out) + } + + stateErr, err = inspectField("test2", "State.Error") + c.Assert(err, check.IsNil) + expected := "port is already allocated" + if stateErr == "" || !strings.Contains(stateErr, expected) { + c.Fatalf("State.Error(%q) does not include %q", stateErr, expected) + } + + // Expect the conflict to be resolved when we stop the initial container + dockerCmd(c, "stop", "test") + dockerCmd(c, "start", "test2") + stateErr, err = inspectField("test2", "State.Error") + c.Assert(err, check.IsNil) + if stateErr != "" { + c.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) + } +} + +func (s *DockerSuite) TestStartPausedContainer(c *check.C) { + defer unpauseAllContainers() + + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + dockerCmd(c, "pause", "testing") + + if out, _, err := dockerCmdWithError(c, "start", "testing"); err == nil || !strings.Contains(out, "Cannot start a paused container, try unpause instead.") { + c.Fatalf("an error should have been shown that you cannot start paused container: %s\n%v", out, err) + } +} + +func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { + // run a container named 'parent' and create two container link to `parent` + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + for _, container := range []string{"child_first", "child_second"} { + dockerCmd(c, "create", "--name", container, "--link", "parent:parent", "busybox", "top") + } + + // stop 'parent' container + dockerCmd(c, "stop", "parent") + + out, err := inspectField("parent", "State.Running") + c.Assert(err, check.IsNil) + if out != "false" { + c.Fatal("Container should be stopped") + } + + // start all the three containers, container `child_first` start first which should be failed + // container 'parent' start second and then start container 'child_second' + out, _, err = dockerCmdWithError(c, "start", "child_first", "parent", "child_second") + if !strings.Contains(out, "Cannot start container child_first") || err == nil { + c.Fatal("Expected error but got none") + } + + for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { + out, err := inspectField(container, "State.Running") + c.Assert(err, check.IsNil) + if out != expected { + c.Fatal("Container running state wrong") + } + + } +} + +func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { + // run multiple containers to test + for _, container := range []string{"test1", "test2", "test3"} { + dockerCmd(c, "run", "-d", "--name", container, "busybox", "top") + } + + // stop all the containers + for _, container := range []string{"test1", "test2", "test3"} { + dockerCmd(c, "stop", container) + } + + // test start and attach multiple containers at once, expected error + for _, option := range []string{"-a", "-i", "-ai"} { + out, _, err := dockerCmdWithError(c, "start", option, "test1", "test2", "test3") + if !strings.Contains(out, "You cannot start and attach multiple containers at once.") || err == nil { + c.Fatal("Expected error but got none") + } + } + + // confirm the state of all the containers be stopped + for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { + out, err := inspectField(container, "State.Running") + if err != nil { + c.Fatal(out, err) + } + if out != expected { + c.Fatal("Container running state wrong") + } + } +} diff --git a/integration-cli/docker_cli_start_volume_driver_unix_test.go b/integration-cli/docker_cli_start_volume_driver_unix_test.go new file mode 100644 index 00000000..71f9f2d5 --- /dev/null +++ b/integration-cli/docker_cli_start_volume_driver_unix_test.go @@ -0,0 +1,246 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + + "github.com/go-check/check" +) + +func init() { + check.Suite(&DockerExternalVolumeSuite{ + ds: &DockerSuite{}, + }) +} + +type eventCounter struct { + activations int + creations int + removals int + mounts int + unmounts int + paths int +} + +type DockerExternalVolumeSuite struct { + server *httptest.Server + ds *DockerSuite + d *Daemon + ec *eventCounter +} + +func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) + s.ec = &eventCounter{} +} + +func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + type pluginRequest struct { + name string + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec.activations++ + + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Implements": ["VolumeDriver"]}`) + }) + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec.creations++ + + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{}`) + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec.removals++ + + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{}`) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + s.ec.paths++ + + var pr pluginRequest + if err := json.NewDecoder(r.Body).Decode(&pr); err != nil { + http.Error(w, err.Error(), 500) + } + + p := hostVolumePath(pr.name) + + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p)) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + s.ec.mounts++ + + var pr pluginRequest + if err := json.NewDecoder(r.Body).Decode(&pr); err != nil { + http.Error(w, err.Error(), 500) + } + + p := hostVolumePath(pr.name) + if err := os.MkdirAll(p, 0755); err != nil { + http.Error(w, err.Error(), 500) + } + + if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil { + http.Error(w, err.Error(), 500) + } + + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p)) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + s.ec.unmounts++ + + var pr pluginRequest + if err := json.NewDecoder(r.Body).Decode(&pr); err != nil { + http.Error(w, err.Error(), 500) + } + + p := hostVolumePath(pr.name) + if err := os.RemoveAll(p); err != nil { + http.Error(w, err.Error(), 500) + } + + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{}`) + }) + + if err := os.MkdirAll("/etc/docker/plugins", 0755); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile("/etc/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644); err != nil { + c.Fatal(err) + } +} + +func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) { + s.server.Close() + + if err := os.RemoveAll("/etc/docker/plugins"); err != nil { + c.Fatal(err) + } +} + +func (s *DockerExternalVolumeSuite) TestStartExternalNamedVolumeDriver(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test") + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, s.server.URL) { + c.Fatalf("External volume mount failed. Output: %s\n", out) + } + + p := hostVolumePath("external-volume-test") + _, err = os.Lstat(p) + if err == nil { + c.Fatalf("Expected error checking volume path in host: %s\n", p) + } + + if !os.IsNotExist(err) { + c.Fatalf("Expected volume path in host to not exist: %s, %v\n", p, err) + } + + c.Assert(s.ec.activations, check.Equals, 1) + c.Assert(s.ec.creations, check.Equals, 1) + c.Assert(s.ec.removals, check.Equals, 1) + c.Assert(s.ec.mounts, check.Equals, 1) + c.Assert(s.ec.unmounts, check.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestStartExternalVolumeUnnamedDriver(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test") + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, s.server.URL) { + c.Fatalf("External volume mount failed. Output: %s\n", out) + } + + c.Assert(s.ec.activations, check.Equals, 1) + c.Assert(s.ec.creations, check.Equals, 1) + c.Assert(s.ec.removals, check.Equals, 1) + c.Assert(s.ec.mounts, check.Equals, 1) + c.Assert(s.ec.unmounts, check.Equals, 1) +} + +func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverVolumesFrom(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil { + c.Fatal(err) + } + + if _, err := s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp"); err != nil { + c.Fatal(err) + } + + if _, err := s.d.Cmd("rm", "-f", "vol-test1"); err != nil { + c.Fatal(err) + } + + c.Assert(s.ec.activations, check.Equals, 1) + c.Assert(s.ec.creations, check.Equals, 2) + c.Assert(s.ec.removals, check.Equals, 1) + c.Assert(s.ec.mounts, check.Equals, 2) + c.Assert(s.ec.unmounts, check.Equals, 2) +} + +func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverDeleteContainer(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil { + c.Fatal(err) + } + + if _, err := s.d.Cmd("rm", "-fv", "vol-test1"); err != nil { + c.Fatal(err) + } + + c.Assert(s.ec.activations, check.Equals, 1) + c.Assert(s.ec.creations, check.Equals, 1) + c.Assert(s.ec.removals, check.Equals, 1) + c.Assert(s.ec.mounts, check.Equals, 1) + c.Assert(s.ec.unmounts, check.Equals, 1) +} + +func hostVolumePath(name string) string { + return fmt.Sprintf("/var/lib/docker/volumes/%s", name) +} diff --git a/integration-cli/docker_cli_stats_test.go b/integration-cli/docker_cli_stats_test.go new file mode 100644 index 00000000..25040289 --- /dev/null +++ b/integration-cli/docker_cli_stats_test.go @@ -0,0 +1,33 @@ +package main + +import ( + "os/exec" + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCliStatsNoStream(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatalf("error waiting for container to start: %v", err) + } + + statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id) + chErr := make(chan error) + go func() { + chErr <- statsCmd.Run() + }() + + select { + case err := <-chErr: + if err != nil { + c.Fatalf("Error running stats: %v", err) + } + case <-time.After(3 * time.Second): + statsCmd.Process.Kill() + c.Fatalf("stats did not return immediately when not streaming") + } +} diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go new file mode 100644 index 00000000..23f2aefa --- /dev/null +++ b/integration-cli/docker_cli_tag_test.go @@ -0,0 +1,151 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +// tagging a named image in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + dockerCmd(c, "tag", "busybox:latest", "testfoobarbaz") +} + +// tagging an image by ID in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { + imageID, err := inspectField("busybox", "Id") + c.Assert(err, check.IsNil) + dockerCmd(c, "tag", imageID, "testfoobarbaz") +} + +// ensure we don't allow the use of invalid repository names; these tag operations should fail +func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { + + invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd"} + + for _, repo := range invalidRepos { + _, _, err := dockerCmdWithError(c, "tag", "busybox", repo) + if err == nil { + c.Fatalf("tag busybox %v should have failed", repo) + } + } +} + +// ensure we don't allow the use of invalid tags; these tag operations should fail +func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { + longTag := stringutils.GenerateRandomAlphaOnlyString(121) + + invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} + + for _, repotag := range invalidTags { + _, _, err := dockerCmdWithError(c, "tag", "busybox", repotag) + if err == nil { + c.Fatalf("tag busybox %v should have failed", repotag) + } + } +} + +// ensure we allow the use of valid tags +func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t"} + + for _, repo := range validRepos { + _, _, err := dockerCmdWithError(c, "tag", "busybox:latest", repo) + if err != nil { + c.Errorf("tag busybox %v should have worked: %s", repo, err) + continue + } + deleteImages(repo) + } +} + +// tag an image with an existed tag name without -f option should fail +func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + dockerCmd(c, "tag", "busybox:latest", "busybox:test") + out, _, err := dockerCmdWithError(c, "tag", "busybox:latest", "busybox:test") + if err == nil || !strings.Contains(out, "Conflict: Tag test is already set to image") { + c.Fatal("tag busybox busybox:test should have failed,because busybox:test is existed") + } +} + +// tag an image with an existed tag name with -f option should work +func (s *DockerSuite) TestTagExistedNameWithForce(c *check.C) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + dockerCmd(c, "tag", "busybox:latest", "busybox:test") + dockerCmd(c, "tag", "-f", "busybox:latest", "busybox:test") +} + +func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + // test repository name begin with '-' + out, _, err := dockerCmdWithError(c, "tag", "busybox:latest", "-busybox:test") + if err == nil || !strings.Contains(out, "repository name component must match") { + c.Fatal("tag a name begin with '-' should failed") + } + // test namespace name begin with '-' + out, _, err = dockerCmdWithError(c, "tag", "busybox:latest", "-test/busybox:test") + if err == nil || !strings.Contains(out, "repository name component must match") { + c.Fatal("tag a name begin with '-' should failed") + } + // test index name begin wiht '-' + out, _, err = dockerCmdWithError(c, "tag", "busybox:latest", "-index:5000/busybox:test") + if err == nil || !strings.Contains(out, "Invalid index name (-index:5000). Cannot begin or end with a hyphen") { + c.Fatal("tag a name begin with '-' should failed") + } +} + +// ensure tagging using official names works +// ensure all tags result in the same name +func (s *DockerSuite) TestTagOfficialNames(c *check.C) { + names := []string{ + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + + for _, name := range names { + out, exitCode, err := dockerCmdWithError(c, "tag", "-f", "busybox:latest", name+":latest") + if err != nil || exitCode != 0 { + c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) + continue + } + + // ensure we don't have multiple tag names. + out, _, err = dockerCmdWithError(c, "images") + if err != nil { + c.Errorf("listing images failed with errors: %v, %s", err, out) + } else if strings.Contains(out, name) { + c.Errorf("images should not have listed '%s'", name) + deleteImages(name + ":latest") + } + } + + for _, name := range names { + _, exitCode, err := dockerCmdWithError(c, "tag", "-f", name+":latest", "fooo/bar:latest") + if err != nil || exitCode != 0 { + c.Errorf("tag %v fooo/bar should have worked: %s", name, err) + continue + } + deleteImages("fooo/bar:latest") + } +} diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go new file mode 100644 index 00000000..667a6c80 --- /dev/null +++ b/integration-cli/docker_cli_top_test.go @@ -0,0 +1,55 @@ +package main + +import ( + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { + out, _ := dockerCmd(c, "run", "-i", "-d", "busybox", "top") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "top", cleanedContainerID, "-o", "pid") + if !strings.Contains(out, "PID") { + c.Fatalf("did not see PID after top -o pid: %s", out) + } + +} + +func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { + out, _ := dockerCmd(c, "run", "-i", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + out, _ = dockerCmd(c, "kill", cleanedContainerID) + + if !strings.Contains(out1, "top") && !strings.Contains(out2, "top") { + c.Fatal("top should've listed `top` in the process list, but failed twice") + } else if !strings.Contains(out1, "top") { + c.Fatal("top should've listed `top` in the process list, but failed the first time") + } else if !strings.Contains(out2, "top") { + c.Fatal("top should've listed `top` in the process list, but failed the second itime") + } + +} + +func (s *DockerSuite) TestTopPrivileged(c *check.C) { + out, _ := dockerCmd(c, "run", "--privileged", "-i", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + out, _ = dockerCmd(c, "kill", cleanedContainerID) + + if !strings.Contains(out1, "top") && !strings.Contains(out2, "top") { + c.Fatal("top should've listed `top` in the process list, but failed twice") + } else if !strings.Contains(out1, "top") { + c.Fatal("top should've listed `top` in the process list, but failed the first time") + } else if !strings.Contains(out2, "top") { + c.Fatal("top should've listed `top` in the process list, but failed the second itime") + } + +} diff --git a/integration-cli/docker_cli_v2_only.go b/integration-cli/docker_cli_v2_only.go new file mode 100644 index 00000000..aa3d6a22 --- /dev/null +++ b/integration-cli/docker_cli_v2_only.go @@ -0,0 +1,148 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/go-check/check" +) + +func makefile(contents string) (string, func(), error) { + cleanup := func() { + + } + + f, err := ioutil.TempFile(".", "tmp") + if err != nil { + return "", cleanup, err + } + err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) + if err != nil { + return "", cleanup, err + } + + cleanup = func() { + err := os.Remove(f.Name()) + if err != nil { + fmt.Println("Error removing tmpfile") + } + } + return f.Name(), cleanup, nil + +} + +// TestV2Only ensures that a daemon in v2-only mode does not +// attempt to contact any v1 registry endpoints. +func (s *DockerRegistrySuite) TestV2Only(c *check.C) { + reg, err := newTestRegistry(c) + if err != nil { + c.Fatal(err.Error()) + } + + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + }) + + reg.registerHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { + c.Fatal("V1 registry contacted") + }) + + repoName := fmt.Sprintf("%s/busybox", reg.hostport) + + err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=true") + if err != nil { + c.Fatalf("Error starting daemon: %s", err.Error()) + } + + dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) + if err != nil { + c.Fatalf("Unable to create test dockerfile") + } + defer cleanup() + + s.d.Cmd("build", "--file", dockerfileName, ".") + + s.d.Cmd("run", repoName) + s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.hostport) + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + s.d.Cmd("pull", repoName) +} + +// TestV1 starts a daemon in 'normal' mode +// and ensure v1 endpoints are hit for the following operations: +// login, push, pull, build & run +func (s *DockerRegistrySuite) TestV1(c *check.C) { + reg, err := newTestRegistry(c) + if err != nil { + c.Fatal(err.Error()) + } + + v2Pings := 0 + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + v2Pings++ + // V2 ping 404 causes fallback to v1 + w.WriteHeader(404) + }) + + v1Pings := 0 + reg.registerHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) { + v1Pings++ + }) + + v1Logins := 0 + reg.registerHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) { + v1Logins++ + }) + + v1Repo := 0 + reg.registerHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + reg.registerHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=false") + if err != nil { + c.Fatalf("Error starting daemon: %s", err.Error()) + } + + dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) + if err != nil { + c.Fatalf("Unable to create test dockerfile") + } + defer cleanup() + + s.d.Cmd("build", "--file", dockerfileName, ".") + if v1Repo == 0 { + c.Errorf("Expected v1 repository access after build") + } + + repoName := fmt.Sprintf("%s/busybox", reg.hostport) + s.d.Cmd("run", repoName) + if v1Repo == 1 { + c.Errorf("Expected v1 repository access after run") + } + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.hostport) + if v1Logins == 0 { + c.Errorf("Expected v1 login attempt") + } + + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + + if v1Repo != 2 || v1Pings != 1 { + c.Error("Not all endpoints contacted after push", v1Repo, v1Pings) + } + + s.d.Cmd("pull", repoName) + if v1Repo != 3 { + c.Errorf("Expected v1 repository access after pull") + } + +} diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go new file mode 100644 index 00000000..f2d8b651 --- /dev/null +++ b/integration-cli/docker_cli_version_test.go @@ -0,0 +1,28 @@ +package main + +import ( + "strings" + + "github.com/go-check/check" +) + +// ensure docker version works +func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "version") + stringsToCheck := map[string]int{ + "Client:": 1, + "Server:": 1, + " Version:": 2, + " API version:": 2, + " Go version:": 2, + " Git commit:": 2, + " OS/Arch:": 2, + " Built:": 2, + } + + for k, v := range stringsToCheck { + if strings.Count(out, k) != v { + c.Errorf("%v expected %d instances found %d", k, v, strings.Count(out, k)) + } + } +} diff --git a/integration-cli/docker_cli_wait_test.go b/integration-cli/docker_cli_wait_test.go new file mode 100644 index 00000000..167ea1a0 --- /dev/null +++ b/integration-cli/docker_cli_wait_test.go @@ -0,0 +1,130 @@ +package main + +import ( + "bytes" + "os/exec" + "strings" + "time" + + "github.com/go-check/check" +) + +// non-blocking wait with 0 exit code +func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "true") + containerID := strings.TrimSpace(out) + + status := "true" + var err error + for i := 0; status != "false"; i++ { + status, err = inspectField(containerID, "State.Running") + c.Assert(err, check.IsNil) + + time.Sleep(time.Second) + if i >= 60 { + c.Fatal("Container should have stopped by now") + } + } + + out, _ = dockerCmd(c, "wait", containerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + +} + +// blocking wait with 0 exit code +func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do sleep 0.01; done") + containerID := strings.TrimSpace(out) + + if err := waitRun(containerID); err != nil { + c.Fatal(err) + } + + chWait := make(chan string) + go func() { + out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID)) + chWait <- out + }() + + time.Sleep(100 * time.Millisecond) + dockerCmd(c, "stop", containerID) + + select { + case status := <-chWait: + if strings.TrimSpace(status) != "0" { + c.Fatalf("expected exit 0, got %s", status) + } + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for `docker wait` to exit") + } + +} + +// non-blocking wait with random exit code +func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "exit 99") + containerID := strings.TrimSpace(out) + + status := "true" + var err error + for i := 0; status != "false"; i++ { + status, err = inspectField(containerID, "State.Running") + c.Assert(err, check.IsNil) + + time.Sleep(time.Second) + if i >= 60 { + c.Fatal("Container should have stopped by now") + } + } + + out, _ = dockerCmd(c, "wait", containerID) + if strings.TrimSpace(out) != "99" { + c.Fatal("failed to set up container", out) + } + +} + +// blocking wait with random exit code +func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do sleep 0.01; done") + containerID := strings.TrimSpace(out) + if err := waitRun(containerID); err != nil { + c.Fatal(err) + } + if err := waitRun(containerID); err != nil { + c.Fatal(err) + } + + chWait := make(chan error) + waitCmd := exec.Command(dockerBinary, "wait", containerID) + waitCmdOut := bytes.NewBuffer(nil) + waitCmd.Stdout = waitCmdOut + if err := waitCmd.Start(); err != nil { + c.Fatal(err) + } + + go func() { + chWait <- waitCmd.Wait() + }() + + dockerCmd(c, "stop", containerID) + + select { + case err := <-chWait: + if err != nil { + c.Fatal(err) + } + status, err := waitCmdOut.ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(status) != "99" { + c.Fatalf("expected exit 99, got %s", status) + } + case <-time.After(2 * time.Second): + waitCmd.Process.Kill() + c.Fatal("timeout waiting for `docker wait` to exit") + } +} diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go new file mode 100644 index 00000000..ed394d26 --- /dev/null +++ b/integration-cli/docker_test_vars.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + "os" + "os/exec" +) + +var ( + // the docker binary to use + dockerBinary = "docker" + + // the private registry image to use for tests involving the registry + registryImageName = "registry" + + // the private registry to use for tests + privateRegistryURL = "127.0.0.1:5000" + + dockerBasePath = "/var/lib/docker" + volumesConfigPath = dockerBasePath + "/volumes" + containerStoragePath = dockerBasePath + "/containers" + + runtimePath = "/var/run/docker" + execDriverPath = runtimePath + "/execdriver/native" + + workingDirectory string +) + +func init() { + if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { + dockerBinary = dockerBin + } + var err error + dockerBinary, err = exec.LookPath(dockerBinary) + if err != nil { + fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)", err) + os.Exit(1) + } + if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { + registryImageName = registryImage + } + if registry := os.Getenv("REGISTRY_URL"); registry != "" { + privateRegistryURL = registry + } + workingDirectory, _ = os.Getwd() +} diff --git a/integration-cli/docker_test_vars_cli.go b/integration-cli/docker_test_vars_cli.go new file mode 100644 index 00000000..e7efb7ce --- /dev/null +++ b/integration-cli/docker_test_vars_cli.go @@ -0,0 +1,8 @@ +// +build !daemon + +package main + +const ( + // tests should not assume daemon runs on the same machine as CLI + isLocalDaemon = false +) diff --git a/integration-cli/docker_test_vars_daemon.go b/integration-cli/docker_test_vars_daemon.go new file mode 100644 index 00000000..a4645900 --- /dev/null +++ b/integration-cli/docker_test_vars_daemon.go @@ -0,0 +1,8 @@ +// +build daemon + +package main + +const ( + // tests can assume daemon runs on the same machine as CLI + isLocalDaemon = true +) diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go new file mode 100644 index 00000000..e80015d1 --- /dev/null +++ b/integration-cli/docker_utils.go @@ -0,0 +1,1292 @@ +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +// Daemon represents a Docker daemon for the testing framework. +type Daemon struct { + // Defaults to "daemon" + // Useful to set to --daemon or -d for checking backwards compatability + Command string + GlobalFlags []string + + id string + c *check.C + logFile *os.File + folder string + stdin io.WriteCloser + stdout, stderr io.ReadCloser + cmd *exec.Cmd + storageDriver string + execDriver string + wait chan error + userlandProxy bool +} + +func enableUserlandProxy() bool { + if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { + if val, err := strconv.ParseBool(env); err != nil { + return val + } + } + return true +} + +// NewDaemon returns a Daemon instance to be used for testing. +// This will create a directory such as d123456789 in the folder specified by $DEST. +// The daemon will not automatically start. +func NewDaemon(c *check.C) *Daemon { + dest := os.Getenv("DEST") + if dest == "" { + c.Fatal("Please set the DEST environment variable") + } + + id := fmt.Sprintf("d%d", time.Now().UnixNano()%100000000) + dir := filepath.Join(dest, id) + daemonFolder, err := filepath.Abs(dir) + if err != nil { + c.Fatalf("Could not make %q an absolute path: %v", dir, err) + } + + if err := os.MkdirAll(filepath.Join(daemonFolder, "graph"), 0600); err != nil { + c.Fatalf("Could not create %s/graph directory", daemonFolder) + } + + userlandProxy := true + if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { + if val, err := strconv.ParseBool(env); err != nil { + userlandProxy = val + } + } + + return &Daemon{ + Command: "daemon", + id: id, + c: c, + folder: daemonFolder, + storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), + execDriver: os.Getenv("DOCKER_EXECDRIVER"), + userlandProxy: userlandProxy, + } +} + +// Start will start the daemon and return once it is ready to receive requests. +// You can specify additional daemon flags. +func (d *Daemon) Start(arg ...string) error { + dockerBinary, err := exec.LookPath(dockerBinary) + if err != nil { + d.c.Fatalf("[%s] could not find docker binary in $PATH: %v", d.id, err) + } + + args := append(d.GlobalFlags, + d.Command, + "--host", d.sock(), + "--graph", fmt.Sprintf("%s/graph", d.folder), + "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), + fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), + ) + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + foundIt := false + for _, a := range arg { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { + foundIt = true + } + } + if !foundIt { + args = append(args, "--debug") + } + + if d.storageDriver != "" { + args = append(args, "--storage-driver", d.storageDriver) + } + if d.execDriver != "" { + args = append(args, "--exec-driver", d.execDriver) + } + + args = append(args, arg...) + d.cmd = exec.Command(dockerBinary, args...) + + d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + d.c.Fatalf("[%s] Could not create %s/docker.log: %v", d.id, d.folder, err) + } + + d.cmd.Stdout = d.logFile + d.cmd.Stderr = d.logFile + + if err := d.cmd.Start(); err != nil { + return fmt.Errorf("[%s] could not start daemon container: %v", d.id, err) + } + + wait := make(chan error) + + go func() { + wait <- d.cmd.Wait() + d.c.Logf("[%s] exiting daemon", d.id) + close(wait) + }() + + d.wait = wait + + tick := time.Tick(500 * time.Millisecond) + // make sure daemon is ready to receive requests + startTime := time.Now().Unix() + for { + d.c.Logf("[%s] waiting for daemon to start", d.id) + if time.Now().Unix()-startTime > 5 { + // After 5 seconds, give up + return fmt.Errorf("[%s] Daemon exited and never started", d.id) + } + select { + case <-time.After(2 * time.Second): + return fmt.Errorf("[%s] timeout: daemon does not respond", d.id) + case <-tick: + c, err := net.Dial("unix", filepath.Join(d.folder, "docker.sock")) + if err != nil { + continue + } + + client := httputil.NewClientConn(c, nil) + defer client.Close() + + req, err := http.NewRequest("GET", "/_ping", nil) + if err != nil { + d.c.Fatalf("[%s] could not create new request: %v", d.id, err) + } + + resp, err := client.Do(req) + if err != nil { + continue + } + if resp.StatusCode != http.StatusOK { + d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status) + } + + d.c.Logf("[%s] daemon started", d.id) + return nil + } + } +} + +// StartWithBusybox will first start the daemon with Daemon.Start() +// then save the busybox image from the main daemon and load it into this Daemon instance. +func (d *Daemon) StartWithBusybox(arg ...string) error { + if err := d.Start(arg...); err != nil { + return err + } + bb := filepath.Join(d.folder, "busybox.tar") + if _, err := os.Stat(bb); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) + } + // saving busybox image from main daemon + if err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").Run(); err != nil { + return fmt.Errorf("could not save busybox image: %v", err) + } + } + // loading busybox image to this daemon + if _, err := d.Cmd("load", "--input", bb); err != nil { + return fmt.Errorf("could not load busybox image: %v", err) + } + if err := os.Remove(bb); err != nil { + d.c.Logf("Could not remove %s: %v", bb, err) + } + return nil +} + +// Stop will send a SIGINT every second and wait for the daemon to stop. +// If it timeouts, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +func (d *Daemon) Stop() error { + if d.cmd == nil || d.wait == nil { + return errors.New("daemon not started") + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + i := 1 + tick := time.Tick(time.Second) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } +out1: + for { + select { + case err := <-d.wait: + return err + case <-time.After(15 * time.Second): + // time for stopping jobs and run onShutdown hooks + d.c.Log("timeout") + break out1 + } + } + +out2: + for { + select { + case err := <-d.wait: + return err + case <-tick: + i++ + if i > 4 { + d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i) + break out2 + } + d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.c.Logf("Could not kill daemon: %v", err) + return err + } + + return nil +} + +// Restart will restart the daemon by first stopping it and then starting it. +func (d *Daemon) Restart(arg ...string) error { + d.Stop() + return d.Start(arg...) +} + +func (d *Daemon) sock() string { + return fmt.Sprintf("unix://%s/docker.sock", d.folder) +} + +// Cmd will execute a docker CLI command against this Daemon. +// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version +func (d *Daemon) Cmd(name string, arg ...string) (string, error) { + args := []string{"--host", d.sock(), name} + args = append(args, arg...) + c := exec.Command(dockerBinary, args...) + b, err := c.CombinedOutput() + return string(b), err +} + +// CmdWithArgs will execute a docker CLI command against a daemon with the +// given additional arguments +func (d *Daemon) CmdWithArgs(daemonArgs []string, name string, arg ...string) (string, error) { + args := append(daemonArgs, name) + args = append(args, arg...) + c := exec.Command(dockerBinary, args...) + b, err := c.CombinedOutput() + return string(b), err +} + +// LogfileName returns the path the the daemon's log file +func (d *Daemon) LogfileName() string { + return d.logFile.Name() +} + +func daemonHost() string { + daemonURLStr := "unix://" + opts.DefaultUnixSocket + if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { + daemonURLStr = daemonHostVar + } + return daemonURLStr +} + +func sockConn(timeout time.Duration) (net.Conn, error) { + daemon := daemonHost() + daemonURL, err := url.Parse(daemon) + if err != nil { + return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) + } + + var c net.Conn + switch daemonURL.Scheme { + case "unix": + return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) + case "tcp": + return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) + default: + return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) + } +} + +func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return -1, nil, err + } + + res, body, err := sockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + return -1, nil, err + } + b, err := readBody(body) + return res.StatusCode, b, err +} + +func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { + c, err := sockConn(time.Duration(10 * time.Second)) + if err != nil { + return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) + } + + client := httputil.NewClientConn(c, nil) + + req, err := http.NewRequest(method, endpoint, data) + if err != nil { + client.Close() + return nil, nil, fmt.Errorf("could not create new request: %v", err) + } + + if ct != "" { + req.Header.Set("Content-Type", ct) + } + + resp, err := client.Do(req) + if err != nil { + client.Close() + return nil, nil, fmt.Errorf("could not perform request: %v", err) + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + defer resp.Body.Close() + return client.Close() + }) + + return resp, body, nil +} + +func readBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) +} + +func deleteContainer(container string) error { + container = strings.TrimSpace(strings.Replace(container, "\n", " ", -1)) + rmArgs := strings.Split(fmt.Sprintf("rm -fv %v", container), " ") + exitCode, err := runCommand(exec.Command(dockerBinary, rmArgs...)) + // set error manually if not set + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to remove container: `docker rm` exit is non-zero") + } + + return err +} + +func getAllContainers() (string, error) { + getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of containers: %v\n", out) + } + + return out, err +} + +func deleteAllContainers() error { + containers, err := getAllContainers() + if err != nil { + fmt.Println(containers) + return err + } + + if err = deleteContainer(containers); err != nil { + return err + } + return nil +} + +var protectedImages = map[string]struct{}{} + +func init() { + out, err := exec.Command(dockerBinary, "images").CombinedOutput() + if err != nil { + panic(err) + } + lines := strings.Split(string(out), "\n")[1:] + for _, l := range lines { + if l == "" { + continue + } + fields := strings.Fields(l) + imgTag := fields[0] + ":" + fields[1] + // just for case if we have dangling images in tested daemon + if imgTag != ":" { + protectedImages[imgTag] = struct{}{} + } + } +} + +func deleteAllImages() error { + out, err := exec.Command(dockerBinary, "images").CombinedOutput() + if err != nil { + return err + } + lines := strings.Split(string(out), "\n")[1:] + var imgs []string + for _, l := range lines { + if l == "" { + continue + } + fields := strings.Fields(l) + imgTag := fields[0] + ":" + fields[1] + if _, ok := protectedImages[imgTag]; !ok { + if fields[0] == "" { + imgs = append(imgs, fields[2]) + continue + } + imgs = append(imgs, imgTag) + } + } + if len(imgs) == 0 { + return nil + } + args := append([]string{"rmi", "-f"}, imgs...) + if err := exec.Command(dockerBinary, args...).Run(); err != nil { + return err + } + return nil +} + +func getPausedContainers() (string, error) { + getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) + } + + return out, err +} + +func getSliceOfPausedContainers() ([]string, error) { + out, err := getPausedContainers() + if err == nil { + if len(out) == 0 { + return nil, err + } + slice := strings.Split(strings.TrimSpace(out), "\n") + return slice, err + } + return []string{out}, err +} + +func unpauseContainer(container string) error { + unpauseCmd := exec.Command(dockerBinary, "unpause", container) + exitCode, err := runCommand(unpauseCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to unpause container") + } + + return nil +} + +func unpauseAllContainers() error { + containers, err := getPausedContainers() + if err != nil { + fmt.Println(containers) + return err + } + + containers = strings.Replace(containers, "\n", " ", -1) + containers = strings.Trim(containers, " ") + containerList := strings.Split(containers, " ") + + for _, value := range containerList { + if err = unpauseContainer(value); err != nil { + return err + } + } + + return nil +} + +func deleteImages(images ...string) error { + args := []string{"rmi", "-f"} + args = append(args, images...) + rmiCmd := exec.Command(dockerBinary, args...) + exitCode, err := runCommand(rmiCmd) + // set error manually if not set + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero") + } + return err +} + +func imageExists(image string) error { + inspectCmd := exec.Command(dockerBinary, "inspect", image) + exitCode, err := runCommand(inspectCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("couldn't find image %q", image) + } + return err +} + +func pullImageIfNotExist(image string) (err error) { + if err := imageExists(image); err != nil { + pullCmd := exec.Command(dockerBinary, "pull", image) + _, exitCode, err := runCommandWithOutput(pullCmd) + + if err != nil || exitCode != 0 { + err = fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) + } + } + return +} + +func dockerCmdWithError(c *check.C, args ...string) (string, int, error) { + return runCommandWithOutput(exec.Command(dockerBinary, args...)) +} + +func dockerCmdWithStdoutStderr(c *check.C, args ...string) (string, string, int) { + stdout, stderr, status, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, args...)) + c.Assert(err, check.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(args, " "), stderr, err)) + return stdout, stderr, status +} + +func dockerCmd(c *check.C, args ...string) (string, int) { + out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + c.Assert(err, check.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(args, " "), out, err)) + return out, status +} + +// execute a docker command with a timeout +func dockerCmdWithTimeout(timeout time.Duration, args ...string) (string, int, error) { + out, status, err := runCommandWithOutputAndTimeout(exec.Command(dockerBinary, args...), timeout) + if err != nil { + return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) + } + return out, status, err +} + +// execute a docker command in a directory +func dockerCmdInDir(c *check.C, path string, args ...string) (string, int, error) { + dockerCommand := exec.Command(dockerBinary, args...) + dockerCommand.Dir = path + out, status, err := runCommandWithOutput(dockerCommand) + if err != nil { + return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) + } + return out, status, err +} + +// execute a docker command in a directory with a timeout +func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) (string, int, error) { + dockerCommand := exec.Command(dockerBinary, args...) + dockerCommand.Dir = path + out, status, err := runCommandWithOutputAndTimeout(dockerCommand, timeout) + if err != nil { + return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) + } + return out, status, err +} + +func findContainerIP(c *check.C, id string, vargs ...string) string { + args := append(vargs, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id) + cmd := exec.Command(dockerBinary, args...) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + return strings.Trim(out, " \r\n'") +} + +func (d *Daemon) findContainerIP(id string) string { + return findContainerIP(d.c, id, "--host", d.sock()) +} + +func getContainerCount() (int, error) { + const containers = "Containers:" + + cmd := exec.Command(dockerBinary, "info") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return 0, err + } + + lines := strings.Split(out, "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := strings.TrimSpace(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + if err != nil { + return 0, err + } + return containerCount, nil + } + } + return 0, fmt.Errorf("couldn't find the Container count in the output") +} + +// FakeContext creates directories that can be used as a build context +type FakeContext struct { + Dir string +} + +// Add a file at a path, creating directories where necessary +func (f *FakeContext) Add(file, content string) error { + return f.addFile(file, []byte(content)) +} + +func (f *FakeContext) addFile(file string, content []byte) error { + filepath := path.Join(f.Dir, file) + dirpath := path.Dir(filepath) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(filepath, content, 0644) + +} + +// Delete a file at a path +func (f *FakeContext) Delete(file string) error { + filepath := path.Join(f.Dir, file) + return os.RemoveAll(filepath) +} + +// Close deletes the context +func (f *FakeContext) Close() error { + return os.RemoveAll(f.Dir) +} + +func fakeContextFromNewTempDir() (*FakeContext, error) { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return nil, err + } + if err := os.Chmod(tmp, 0755); err != nil { + return nil, err + } + return fakeContextFromDir(tmp), nil +} + +func fakeContextFromDir(dir string) *FakeContext { + return &FakeContext{dir} +} + +func fakeContextWithFiles(files map[string]string) (*FakeContext, error) { + ctx, err := fakeContextFromNewTempDir() + if err != nil { + return nil, err + } + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + return ctx, nil +} + +func fakeContextAddDockerfile(ctx *FakeContext, dockerfile string) error { + if err := ctx.Add("Dockerfile", dockerfile); err != nil { + ctx.Close() + return err + } + return nil +} + +func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil { + return nil, err + } + return ctx, nil +} + +// FakeStorage is a static file server. It might be running locally or remotely +// on test host. +type FakeStorage interface { + Close() error + URL() string + CtxDir() string +} + +func fakeBinaryStorage(archives map[string]*bytes.Buffer) (FakeStorage, error) { + ctx, err := fakeContextFromNewTempDir() + if err != nil { + return nil, err + } + for name, content := range archives { + if err := ctx.addFile(name, content.Bytes()); err != nil { + return nil, err + } + } + return fakeStorageWithContext(ctx) +} + +// fakeStorage returns either a local or remote (at daemon machine) file server +func fakeStorage(files map[string]string) (FakeStorage, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + return fakeStorageWithContext(ctx) +} + +// fakeStorageWithContext returns either a local or remote (at daemon machine) file server +func fakeStorageWithContext(ctx *FakeContext) (FakeStorage, error) { + if isLocalDaemon { + return newLocalFakeStorage(ctx) + } + return newRemoteFileServer(ctx) +} + +// localFileStorage is a file storage on the running machine +type localFileStorage struct { + *FakeContext + *httptest.Server +} + +func (s *localFileStorage) URL() string { + return s.Server.URL +} + +func (s *localFileStorage) CtxDir() string { + return s.FakeContext.Dir +} + +func (s *localFileStorage) Close() error { + defer s.Server.Close() + return s.FakeContext.Close() +} + +func newLocalFakeStorage(ctx *FakeContext) (*localFileStorage, error) { + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &localFileStorage{ + FakeContext: ctx, + Server: server, + }, nil +} + +// remoteFileServer is a containerized static file server started on the remote +// testing machine to be used in URL-accepting docker build functionality. +type remoteFileServer struct { + host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 + container string + image string + ctx *FakeContext +} + +func (f *remoteFileServer) URL() string { + u := url.URL{ + Scheme: "http", + Host: f.host} + return u.String() +} + +func (f *remoteFileServer) CtxDir() string { + return f.ctx.Dir +} + +func (f *remoteFileServer) Close() error { + defer func() { + if f.ctx != nil { + f.ctx.Close() + } + if f.image != "" { + deleteImages(f.image) + } + }() + if f.container == "" { + return nil + } + return deleteContainer(f.container) +} + +func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) { + var ( + image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + ) + + // Build the image + if err := fakeContextAddDockerfile(ctx, `FROM httpserver +COPY . /static`); err != nil { + return nil, fmt.Errorf("Cannot add Dockerfile to context: %v", err) + } + if _, err := buildImageFromContext(image, ctx, false); err != nil { + return nil, fmt.Errorf("failed building file storage container image: %v", err) + } + + // Start the container + runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--name", container, image) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + return nil, fmt.Errorf("failed to start file storage container. ec=%v\nout=%s\nerr=%v", ec, out, err) + } + + // Find out the system assigned port + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "port", container, "80/tcp")) + if err != nil { + return nil, fmt.Errorf("failed to find container port: err=%v\nout=%s", err, out) + } + + return &remoteFileServer{ + container: container, + image: image, + host: strings.Trim(out, "\n"), + ctx: ctx}, nil +} + +func inspectFieldAndMarshall(name, field string, output interface{}) error { + str, err := inspectFieldJSON(name, field) + if err != nil { + return err + } + + return json.Unmarshal([]byte(str), output) +} + +func inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect container %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func inspectField(name, field string) (string, error) { + return inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +func inspectFieldJSON(name, field string) (string, error) { + return inspectFilter(name, fmt.Sprintf("json .%s", field)) +} + +func inspectFieldMap(name, path, field string) (string, error) { + return inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) +} + +func inspectMountSourceField(name, destination string) (string, error) { + m, err := inspectMountPoint(name, destination) + if err != nil { + return "", err + } + return m.Source, nil +} + +func inspectMountPoint(name, destination string) (types.MountPoint, error) { + out, err := inspectFieldJSON(name, "Mounts") + if err != nil { + return types.MountPoint{}, err + } + + return inspectMountPointJSON(out, destination) +} + +var errMountNotFound = errors.New("mount point not found") + +func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { + var mp []types.MountPoint + if err := unmarshalJSON([]byte(j), &mp); err != nil { + return types.MountPoint{}, err + } + + var m *types.MountPoint + for _, c := range mp { + if c.Destination == destination { + m = &c + break + } + } + + if m == nil { + return types.MountPoint{}, errMountNotFound + } + + return *m, nil +} + +func getIDByName(name string) (string, error) { + return inspectField(name, "Id") +} + +// getContainerState returns the exit code of the container +// and true if it's running +// the exit code should be ignored if it's running +func getContainerState(c *check.C, id string) (int, bool, error) { + var ( + exitStatus int + running bool + ) + out, exitCode := dockerCmd(c, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) + if exitCode != 0 { + return 0, false, fmt.Errorf("%q doesn't exist: %s", id, out) + } + + out = strings.Trim(out, "\n") + splitOutput := strings.Split(out, " ") + if len(splitOutput) != 2 { + return 0, false, fmt.Errorf("failed to get container state: output is broken") + } + if splitOutput[0] == "true" { + running = true + } + if n, err := strconv.Atoi(splitOutput[1]); err == nil { + exitStatus = n + } else { + return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") + } + + return exitStatus, running, nil +} + +func buildImageCmd(name, dockerfile string, useCache bool) *exec.Cmd { + args := []string{"-D", "build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd + +} + +func buildImageWithOut(name, dockerfile string, useCache bool) (string, string, error) { + buildCmd := buildImageCmd(name, dockerfile, useCache) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", out, fmt.Errorf("failed to build the image: %s", out) + } + id, err := getIDByName(name) + if err != nil { + return "", out, err + } + return id, out, nil +} + +func buildImageWithStdoutStderr(name, dockerfile string, useCache bool) (string, string, string, error) { + buildCmd := buildImageCmd(name, dockerfile, useCache) + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImage(name, dockerfile string, useCache bool) (string, error) { + id, _, err := buildImageWithOut(name, dockerfile, useCache) + return id, err +} + +func buildImageFromContext(name string, ctx *FakeContext, useCache bool) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +func buildImageFromPath(name, path string, useCache bool) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, path) + buildCmd := exec.Command(dockerBinary, args...) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +type gitServer interface { + URL() string + Close() error +} + +type localGitServer struct { + *httptest.Server +} + +func (r *localGitServer) Close() error { + r.Server.Close() + return nil +} + +func (r *localGitServer) URL() string { + return r.Server.URL +} + +type fakeGit struct { + root string + server gitServer + RepoURL string +} + +func (g *fakeGit) Close() { + g.server.Close() + os.RemoveAll(g.root) +} + +func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (*fakeGit, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + return nil, err + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + return nil, err + } + if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to set 'user.name': %s (%s)", err, output) + } + if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to set 'user.email': %s (%s)", err, output) + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + return nil, err + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + return nil, err + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + return nil, err + } + + var server gitServer + if !enforceLocalServer { + // use fakeStorage server, which might be local or remote (at test daemon) + server, err = fakeStorageWithContext(fakeContextFromDir(root)) + if err != nil { + return nil, fmt.Errorf("cannot start fake storage: %v", err) + } + } else { + // always start a local http server on CLI test machin + httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) + server = &localGitServer{httpServer} + } + return &fakeGit{ + root: root, + server: server, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), + }, nil +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Call c.Fatal() at the first error. +func writeFile(dst, content string, c *check.C) { + // Create subdirectories if necessary + if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { + c.Fatal(err) + } + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + if err != nil { + c.Fatal(err) + } + defer f.Close() + // Write content (truncate if it exists) + if _, err := io.Copy(f, strings.NewReader(content)); err != nil { + c.Fatal(err) + } +} + +// Return the contents of file at path `src`. +// Call c.Fatal() at the first error (including if the file doesn't exist) +func readFile(src string, c *check.C) (content string) { + data, err := ioutil.ReadFile(src) + if err != nil { + c.Fatal(err) + } + + return string(data) +} + +func containerStorageFile(containerID, basename string) string { + return filepath.Join("/var/lib/docker/containers", containerID, basename) +} + +// docker commands that use this function must be run with the '-d' switch. +func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return nil, fmt.Errorf("%v: %q", err, out) + } + + time.Sleep(1 * time.Second) + + contID := strings.TrimSpace(out) + + return readContainerFile(contID, filename) +} + +func readContainerFile(containerID, filename string) ([]byte, error) { + f, err := os.Open(containerStorageFile(containerID, filename)) + if err != nil { + return nil, err + } + defer f.Close() + + content, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return content, nil +} + +func readContainerFileWithExec(containerID, filename string) ([]byte, error) { + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "exec", containerID, "cat", filename)) + return []byte(out), err +} + +// daemonTime provides the current time on the daemon host +func daemonTime(c *check.C) time.Time { + if isLocalDaemon { + return time.Now() + } + + status, body, err := sockRequest("GET", "/info", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + type infoJSON struct { + SystemTime string + } + var info infoJSON + if err = json.Unmarshal(body, &info); err != nil { + c.Fatalf("unable to unmarshal /info response: %v", err) + } + + dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) + if err != nil { + c.Fatal(err) + } + return dt +} + +func setupRegistry(c *check.C) *testRegistryV2 { + testRequires(c, RegistryHosting) + reg, err := newTestRegistryV2(c) + if err != nil { + c.Fatal(err) + } + + // Wait for registry to be ready to serve requests. + for i := 0; i != 5; i++ { + if err = reg.Ping(); err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + + if err != nil { + c.Fatal("Timeout waiting for test registry to become available") + } + return reg +} + +func setupNotary(c *check.C) *testNotary { + testRequires(c, NotaryHosting) + ts, err := newTestNotary(c) + if err != nil { + c.Fatal(err) + } + + return ts +} + +// appendBaseEnv appends the minimum set of environment variables to exec the +// docker cli binary for testing with correct configuration to the given env +// list. +func appendBaseEnv(env []string) []string { + preserveList := []string{ + // preserve remote test host + "DOCKER_HOST", + + // windows: requires preserving SystemRoot, otherwise dial tcp fails + // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." + "SystemRoot", + } + + for _, key := range preserveList { + if val := os.Getenv(key); val != "" { + env = append(env, fmt.Sprintf("%s=%s", key, val)) + } + } + return env +} diff --git a/integration-cli/fixtures/https/ca.pem b/integration-cli/fixtures/https/ca.pem new file mode 100644 index 00000000..6825d6d1 --- /dev/null +++ b/integration-cli/fixtures/https/ca.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI +Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls +QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx +CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv +MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD +VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW +EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn +0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp +AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 +sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV +HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi +zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE +ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt +Zxtf5lL6KSO9Y+EFwM+rju6hm5hW +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/client-cert.pem b/integration-cli/fixtures/https/client-cert.pem new file mode 100644 index 00000000..c05ed47c --- /dev/null +++ b/integration-cli/fixtures/https/client-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 14:17:54 2013 GMT + Not After : Dec 2 14:17:54 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: + 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: + f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: + b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: + 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: + 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: + aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: + 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: + 7e:4e:78:7d:0a:9e:8f:42:43 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: + 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: + 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: + af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: + 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: + f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: + 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: + 4a:c4 +-----BEGIN CERTIFICATE----- +MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp +ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 +LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 +peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB +Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 +cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ +YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV +HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN +AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ +kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 +aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/client-key.pem b/integration-cli/fixtures/https/client-key.pem new file mode 100644 index 00000000..b5c15f8d --- /dev/null +++ b/integration-cli/fixtures/https/client-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU +9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw +gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ +93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh +xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 +FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN +OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC +4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU +SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe +iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy +v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl +qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw +qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 +ksDFuNxAzbhl +-----END PRIVATE KEY----- diff --git a/integration-cli/fixtures/https/client-rogue-cert.pem b/integration-cli/fixtures/https/client-rogue-cert.pem new file mode 100644 index 00000000..21ae4bd5 --- /dev/null +++ b/integration-cli/fixtures/https/client-rogue-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 24 17:54:59 2014 GMT + Not After : Feb 22 17:54:59 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: + e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: + 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: + bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: + b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: + f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: + e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: + 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: + 1d:7b:6c:7b:be:89:6b:88:8b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: + 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: + fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: + be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: + cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: + 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: + 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: + b3:f9 +-----BEGIN CERTIFICATE----- +MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx +ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t +YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM +R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 +aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL +lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB +hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW +BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x +I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw +EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL +EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l +MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD +VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB +AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 +RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 +C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/client-rogue-key.pem b/integration-cli/fixtures/https/client-rogue-key.pem new file mode 100644 index 00000000..53c122ab --- /dev/null +++ b/integration-cli/fixtures/https/client-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce +aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W +tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf +bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ +nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW ++IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej +VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd +vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 +6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F +MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa +8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg +OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ +SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 +nrOdMf15T6QF7Q== +-----END PRIVATE KEY----- diff --git a/integration-cli/fixtures/https/server-cert.pem b/integration-cli/fixtures/https/server-cert.pem new file mode 100644 index 00000000..08abfd1a --- /dev/null +++ b/integration-cli/fixtures/https/server-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 15:01:20 2013 GMT + Not After : Dec 2 15:01:20 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: + e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: + 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: + 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: + e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: + 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: + 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: + c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: + a8:05:32:1e:f9:95:09:14:75 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: + ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: + 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: + df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: + c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: + 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: + 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: + 15:42 +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER +MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h +aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b +LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 +cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch +M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG ++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl +cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw +gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ +BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw +EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL +zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn +mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX +dDBV9m4gmmweCbQMFUI= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/server-key.pem b/integration-cli/fixtures/https/server-key.pem new file mode 100644 index 00000000..c269320e --- /dev/null +++ b/integration-cli/fixtures/https/server-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx +0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y +4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ +lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ +wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ +wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS +IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 +4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP +WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq ++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv +HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj ++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc +BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW +5nCwDu5ZTP+khltg +-----END PRIVATE KEY----- diff --git a/integration-cli/fixtures/https/server-rogue-cert.pem b/integration-cli/fixtures/https/server-rogue-cert.pem new file mode 100644 index 00000000..28feba66 --- /dev/null +++ b/integration-cli/fixtures/https/server-rogue-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 28 18:49:31 2014 GMT + Not After : Feb 26 18:49:31 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: + 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: + 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: + 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: + 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: + aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: + d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: + 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: + 9e:02:5c:be:65:98:a4:b4:b5 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: + 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: + 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: + 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: + 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: + 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: + ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: + c7:9f +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv +c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu +ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I +dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc +qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW +VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg +hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO +lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe +MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj +bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw +Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO +AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 +CQxdfIYk3ZLVsxQGx58= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/server-rogue-key.pem b/integration-cli/fixtures/https/server-rogue-key.pem new file mode 100644 index 00000000..10f7c650 --- /dev/null +++ b/integration-cli/fixtures/https/server-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG +j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq +FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C +ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR +8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 +6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl +1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD +37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO +moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl +3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w +ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs +wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj +iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ +Z1hrIq8xYl2LOQ== +-----END PRIVATE KEY----- diff --git a/integration-cli/fixtures/notary/localhost.cert b/integration-cli/fixtures/notary/localhost.cert new file mode 100644 index 00000000..d1233a1b --- /dev/null +++ b/integration-cli/fixtures/notary/localhost.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfOgAwIBAgIQTOoFF+ypXwgdXnXHuCTvYDALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDcxNzE5 +NDg1M1oXDTE4MDcwMTE5NDg1M1owJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV +BAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMDO +qvTBAi0ApXLfe90ApJkdkRGwF838Qzt1UFSxomu5fHRV6l3FjX5XCVHiFQ4w3ROh +dMOu9NahfGLJv9VvWU2MV3YoY9Y7lIXpKwnK1v064wuls4nPh13BUWKQKofcY/e2 +qaSPd6/qmSRc/kJUvOI9jZMSX6ZRPu9K4PCqm2CivlbLq9UYuo1AbRGfuqHRvTxg +mQG7WQCzGSvSjuSg5qX3TEh0HckTczJG9ODULNRWNE7ld0W4sfv4VF8R7Uc/G7LO +8QwLCZ9TIl3gYMPCrhUL3Q6z9Jnn1SQS4mhDnPi6ugRYO1X8k3jjdxV9C2sXwUvN +OZI1rLEWl9TJNA7ZXtMCAwEAAaM2MDQwDgYDVR0PAQH/BAQDAgCgMAwGA1UdEwEB +/wQCMAAwFAYDVR0RBA0wC4IJbG9jYWxob3N0MAsGCSqGSIb3DQEBCwOCAQEAH6iq +kM2+UMukGDLEQKHHiauioWJlHDlLXv76bJiNfjSz94B/2XOQMb9PT04//tnGUyPK +K8Dx7RoxSodU6T5VRiz/A36mLOvt2t3bcL/1nHf9sAOHcexGtnCbQbW91V7RKfIL +sjiLNFDkQ9VfVNY+ynQptZoyH1sy07+dplfkIiPzRs5WuVAnEGsX3r6BrhgUITzi +g1B4kpmGZIohP4m6ZEBY5xuo/NQ0+GhjAENQMU38GpuoMyFS0i0dGcbx8weqnI/B +Er/qa0+GE/rBnWY8TiRow8dzpneSFQnUZpJ4EwD9IoOIDHo7k2Nbz2P50HMiCXZf +4RqzctVssRlrRVnO5w== +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/localhost.key b/integration-cli/fixtures/notary/localhost.key new file mode 100644 index 00000000..d7778359 --- /dev/null +++ b/integration-cli/fixtures/notary/localhost.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAwM6q9MECLQClct973QCkmR2REbAXzfxDO3VQVLGia7l8dFXq +XcWNflcJUeIVDjDdE6F0w6701qF8Ysm/1W9ZTYxXdihj1juUhekrCcrW/TrjC6Wz +ic+HXcFRYpAqh9xj97appI93r+qZJFz+QlS84j2NkxJfplE+70rg8KqbYKK+Vsur +1Ri6jUBtEZ+6odG9PGCZAbtZALMZK9KO5KDmpfdMSHQdyRNzMkb04NQs1FY0TuV3 +Rbix+/hUXxHtRz8bss7xDAsJn1MiXeBgw8KuFQvdDrP0mefVJBLiaEOc+Lq6BFg7 +VfyTeON3FX0LaxfBS805kjWssRaX1Mk0Dtle0wIDAQABAoIBAHbuhNHZROhRn70O +Ui9vOBki/dt1ThnH5AkHQngb4t6kWjrAzILvW2p1cdBKr0ZDqftz+rzCbVD/5+Rg +Iq8bsnB9g23lWEBMHD/GJsAxmRA3hNooamk11IBmwTcVSsbnkdq5mEdkICYphjHC +Ey0DbEf6RBxWlx3WvAWLoNmTw6iFaOCH8IyLavPpe7kLbZc219oNUw2qjCnCXCZE +/NuViADHJBPN8r7g1gmyclJmTumdUK6oHgXEMMPe43vhReGcgcReK9QZjnTcIXPM +4oJOraw+BtoZXVvvIPnC+5ntoLFOzjIzM0kaveReZbdgffqF4zy2vRfCHhWssanc +7a0xR4ECgYEA3Xuvcqy5Xw+v/jVCO0VZj++Z7apA78dY4tWsPx5/0DUTTziTlXkC +ADduEbwX6HgZ/iLvA9j4C3Z4mO8qByby/6UoBU8NEe+PQt6fT7S+dKSP4uy5ZxVM +i5opkEyrJsMbve9Jrlj4bk5CICsydrZ+SBFHnpNGjbduGQick5LORWECgYEA3trt +gepteDGiUYmnnBgjbYtcD11RvpKC8Z/QwGnzN5vk4eBu8r7DkMcLN+SiHjAovlJo +r5j3EbF8sla1zBf/yySdQZFqUGcwtw7MaAKCLdhQl5WsViNMIx6p2OJapu0dzbv2 +KTXrnoRCafcH92k0dUX1ahE9eyc8KX6VhbWwXLMCgYATGCCuEDoC+gVAMzM8jOQF +xrBMjwr+IP+GvskUv/pg5tJ9V/FRR5dmkWDJ4p9lCUWkZTqZ6FCqHFKVTLkg2LjG +VWS34HLOAwskxrCRXJG22KEW/TWWr31j46yFpjZzJwrzOvftMfpo+BI3V8IH/f+x +EtxLzYKdoRy6x8VH67YgwQKBgHor2vjV45142FuK83AHa6SqOZXSuvWWrGJ6Ep7p +doSN2jRaLXi2S9AaznOdy6JxFGUCGJHrcccpXgsGrjNtFLXxJKTFa1sYtwQkALsk +ZOltJQF09D1krGC0driHntrUMvqOiKye+sS0DRS6cIuaCUAhUiELwoC5SaoV0zKy +IDUxAoGAOK8Xq+3/sqe79vTpw25RXl+nkAmOAeKjqf3Kh6jbnBhr81rmefyKXB9a +uj0b980tzUnliwA5cCOsyxfN2vASvMnJxFE721QZI04arlcPFHcFqCtmNnUYTcLp +0hgn/yLZptcoxpy+eTBu3eNsxz1Bu/Tx/198+2Wr3MbtGpLNIcA= +-----END RSA PRIVATE KEY----- diff --git a/integration-cli/registry.go b/integration-cli/registry.go new file mode 100644 index 00000000..35e1b4eb --- /dev/null +++ b/integration-cli/registry.go @@ -0,0 +1,120 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/distribution/digest" + "github.com/go-check/check" +) + +const v2binary = "registry-v2" + +type testRegistryV2 struct { + cmd *exec.Cmd + dir string +} + +func newTestRegistryV2(c *check.C) (*testRegistryV2, error) { + template := `version: 0.1 +loglevel: debug +storage: + filesystem: + rootdirectory: %s +http: + addr: %s` + tmp, err := ioutil.TempDir("", "registry-test-") + if err != nil { + return nil, err + } + confPath := filepath.Join(tmp, "config.yaml") + config, err := os.Create(confPath) + if err != nil { + return nil, err + } + if _, err := fmt.Fprintf(config, template, tmp, privateRegistryURL); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + cmd := exec.Command(v2binary, confPath) + if err := cmd.Start(); err != nil { + os.RemoveAll(tmp) + if os.IsNotExist(err) { + c.Skip(err.Error()) + } + return nil, err + } + return &testRegistryV2{ + cmd: cmd, + dir: tmp, + }, nil +} + +func (t *testRegistryV2) Ping() error { + // We always ping through HTTP for our test registry. + resp, err := http.Get(fmt.Sprintf("http://%s/v2/", privateRegistryURL)) + if err != nil { + return err + } + if resp.StatusCode != 200 { + return fmt.Errorf("registry ping replied with an unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (t *testRegistryV2) Close() { + t.cmd.Process.Kill() + os.RemoveAll(t.dir) +} + +func (t *testRegistryV2) getBlobFilename(blobDigest digest.Digest) string { + // Split the digest into it's algorithm and hex components. + dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex() + + // The path to the target blob data looks something like: + // baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data" + return fmt.Sprintf("%s/docker/registry/v2/blobs/%s/%s/%s/data", t.dir, dgstAlg, dgstHex[:2], dgstHex) +} + +func (t *testRegistryV2) readBlobContents(c *check.C, blobDigest digest.Digest) []byte { + // Load the target manifest blob. + manifestBlob, err := ioutil.ReadFile(t.getBlobFilename(blobDigest)) + if err != nil { + c.Fatalf("unable to read blob: %s", err) + } + + return manifestBlob +} + +func (t *testRegistryV2) writeBlobContents(c *check.C, blobDigest digest.Digest, data []byte) { + if err := ioutil.WriteFile(t.getBlobFilename(blobDigest), data, os.FileMode(0644)); err != nil { + c.Fatalf("unable to write malicious data blob: %s", err) + } +} + +func (t *testRegistryV2) tempMoveBlobData(c *check.C, blobDigest digest.Digest) (undo func()) { + tempFile, err := ioutil.TempFile("", "registry-temp-blob-") + if err != nil { + c.Fatalf("unable to get temporary blob file: %s", err) + } + tempFile.Close() + + blobFilename := t.getBlobFilename(blobDigest) + + // Move the existing data file aside, so that we can replace it with a + // another blob of data. + if err := os.Rename(blobFilename, tempFile.Name()); err != nil { + os.Remove(tempFile.Name()) + c.Fatalf("unable to move data blob: %s", err) + } + + return func() { + os.Rename(tempFile.Name(), blobFilename) + os.Remove(tempFile.Name()) + } +} diff --git a/integration-cli/registry_mock.go b/integration-cli/registry_mock.go new file mode 100644 index 00000000..e5fb64c1 --- /dev/null +++ b/integration-cli/registry_mock.go @@ -0,0 +1,56 @@ +package main + +import ( + "net/http" + "net/http/httptest" + "regexp" + "strings" + "sync" + + "github.com/go-check/check" +) + +type handlerFunc func(w http.ResponseWriter, r *http.Request) + +type testRegistry struct { + server *httptest.Server + hostport string + handlers map[string]handlerFunc + mu sync.Mutex +} + +func (tr *testRegistry) registerHandler(path string, h handlerFunc) { + tr.mu.Lock() + defer tr.mu.Unlock() + tr.handlers[path] = h +} + +func newTestRegistry(c *check.C) (*testRegistry, error) { + testReg := &testRegistry{handlers: make(map[string]handlerFunc)} + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + url := r.URL.String() + + var matched bool + var err error + for re, function := range testReg.handlers { + matched, err = regexp.MatchString(re, url) + if err != nil { + c.Fatalf("Error with handler regexp") + return + } + if matched { + function(w, r) + break + } + } + + if !matched { + c.Fatal("Unable to match", url, "with regexp") + } + })) + + testReg.server = ts + testReg.hostport = strings.Replace(ts.URL, "http://", "", 1) + return testReg, nil +} diff --git a/integration-cli/requirements.go b/integration-cli/requirements.go new file mode 100644 index 00000000..ce080d50 --- /dev/null +++ b/integration-cli/requirements.go @@ -0,0 +1,142 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "os/exec" + "strings" + "time" + + "github.com/go-check/check" +) + +type testCondition func() bool + +type testRequirement struct { + Condition testCondition + SkipMessage string +} + +// List test requirements +var ( + daemonExecDriver string + + SameHostDaemon = testRequirement{ + func() bool { return isLocalDaemon }, + "Test requires docker daemon to runs on the same machine as CLI", + } + UnixCli = testRequirement{ + func() bool { return isUnixCli }, + "Test requires posix utilities or functionality to run.", + } + ExecSupport = testRequirement{ + func() bool { return supportsExec }, + "Test requires 'docker exec' capabilities on the tested daemon.", + } + Network = testRequirement{ + func() bool { + // Set a timeout on the GET at 15s + var timeout = time.Duration(15 * time.Second) + var url = "https://hub.docker.com" + + client := http.Client{ + Timeout: timeout, + } + + resp, err := client.Get(url) + if err != nil && strings.Contains(err.Error(), "use of closed network connection") { + panic(fmt.Sprintf("Timeout for GET request on %s", url)) + } + if resp != nil { + resp.Body.Close() + } + return err == nil + }, + "Test requires network availability, environment variable set to none to run in a non-network enabled mode.", + } + Apparmor = testRequirement{ + func() bool { + buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + return err == nil && len(buf) > 1 && buf[0] == 'Y' + }, + "Test requires apparmor is enabled.", + } + RegistryHosting = testRequirement{ + func() bool { + // for now registry binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // registry binary is in PATH. + _, err := exec.LookPath(v2binary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary), + } + NotaryHosting = testRequirement{ + func() bool { + // for now notary binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryBinary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryBinary), + } + NativeExecDriver = testRequirement{ + func() bool { + if daemonExecDriver == "" { + // get daemon info + status, body, err := sockRequest("GET", "/info", nil) + if err != nil || status != http.StatusOK { + log.Fatalf("sockRequest failed for /info: %v", err) + } + + type infoJSON struct { + ExecutionDriver string + } + var info infoJSON + if err = json.Unmarshal(body, &info); err != nil { + log.Fatalf("unable to unmarshal body: %v", err) + } + + daemonExecDriver = info.ExecutionDriver + } + + return strings.HasPrefix(daemonExecDriver, "native") + }, + "Test requires the native (libcontainer) exec driver.", + } + NotOverlay = testRequirement{ + func() bool { + cmd := exec.Command("grep", "^overlay / overlay", "/proc/mounts") + if err := cmd.Run(); err != nil { + return true + } + return false + }, + "Test requires underlying root filesystem not be backed by overlay.", + } + IPv6 = testRequirement{ + func() bool { + cmd := exec.Command("test", "-f", "/proc/net/if_inet6") + + if err := cmd.Run(); err != nil { + return true + } + return false + }, + "Test requires support for IPv6", + } +) + +// testRequires checks if the environment satisfies the requirements +// for the test to run or skips the tests. +func testRequires(c *check.C, requirements ...testRequirement) { + for _, r := range requirements { + if !r.Condition() { + c.Skip(r.SkipMessage) + } + } +} diff --git a/integration-cli/requirements_unix.go b/integration-cli/requirements_unix.go new file mode 100644 index 00000000..af8a2614 --- /dev/null +++ b/integration-cli/requirements_unix.go @@ -0,0 +1,57 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "path" + + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +var ( + cpuCfsPeriod = testRequirement{ + func() bool { + cgroupCPUMountpoint, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return false + } + if _, err := ioutil.ReadFile(path.Join(cgroupCPUMountpoint, "cpu.cfs_period_us")); err != nil { + return false + } + return true + }, + "Test requires an environment that supports cgroup cfs period.", + } + cpuCfsQuota = testRequirement{ + func() bool { + cgroupCPUMountpoint, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return false + } + if _, err := ioutil.ReadFile(path.Join(cgroupCPUMountpoint, "cpu.cfs_quota_us")); err != nil { + return false + } + return true + }, + "Test requires an environment that supports cgroup cfs quota.", + } + oomControl = testRequirement{ + func() bool { + cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory") + if err != nil { + return false + } + if _, err := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")); err != nil { + return false + } + + if _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.oom_control")); err != nil { + return false + } + return true + + }, + "Test requires Oom control enabled.", + } +) diff --git a/integration-cli/test_vars_exec.go b/integration-cli/test_vars_exec.go new file mode 100644 index 00000000..7633b346 --- /dev/null +++ b/integration-cli/test_vars_exec.go @@ -0,0 +1,8 @@ +// +build !test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = true +) diff --git a/integration-cli/test_vars_noexec.go b/integration-cli/test_vars_noexec.go new file mode 100644 index 00000000..08450905 --- /dev/null +++ b/integration-cli/test_vars_noexec.go @@ -0,0 +1,8 @@ +// +build test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = false +) diff --git a/integration-cli/test_vars_unix.go b/integration-cli/test_vars_unix.go new file mode 100644 index 00000000..1ab8a5ca --- /dev/null +++ b/integration-cli/test_vars_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = true + + expectedFileChmod = "-rw-r--r--" +) diff --git a/integration-cli/test_vars_windows.go b/integration-cli/test_vars_windows.go new file mode 100644 index 00000000..f81ac53c --- /dev/null +++ b/integration-cli/test_vars_windows.go @@ -0,0 +1,11 @@ +// +build windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = false + + // this is the expected file permission set on windows: gh#11395 + expectedFileChmod = "-rwxr-xr-x" +) diff --git a/integration-cli/trust_server.go b/integration-cli/trust_server.go new file mode 100644 index 00000000..89d88a84 --- /dev/null +++ b/integration-cli/trust_server.go @@ -0,0 +1,162 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/pkg/tlsconfig" + "github.com/go-check/check" +) + +var notaryBinary = "notary-server" + +type testNotary struct { + cmd *exec.Cmd + dir string +} + +func newTestNotary(c *check.C) (*testNotary, error) { + template := `{ + "server": { + "addr": "%s", + "tls_key_file": "fixtures/notary/localhost.key", + "tls_cert_file": "fixtures/notary/localhost.cert" + }, + "trust_service": { + "type": "local", + "hostname": "", + "port": "", + "key_algorithm": "ed25519" + }, + "logging": { + "level": 5 + } +}` + tmp, err := ioutil.TempDir("", "notary-test-") + if err != nil { + return nil, err + } + confPath := filepath.Join(tmp, "config.json") + config, err := os.Create(confPath) + if err != nil { + return nil, err + } + if _, err := fmt.Fprintf(config, template, "localhost:4443"); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + cmd := exec.Command(notaryBinary, "-config", confPath) + if err := cmd.Start(); err != nil { + os.RemoveAll(tmp) + if os.IsNotExist(err) { + c.Skip(err.Error()) + } + return nil, err + } + + testNotary := &testNotary{ + cmd: cmd, + dir: tmp, + } + + // Wait for notary to be ready to serve requests. + for i := 1; i <= 5; i++ { + if err = testNotary.Ping(); err == nil { + break + } + time.Sleep(10 * time.Millisecond * time.Duration(i*i)) + } + + if err != nil { + c.Fatalf("Timeout waiting for test notary to become available: %s", err) + } + + return testNotary, nil +} + +func (t *testNotary) address() string { + return "localhost:4443" +} + +func (t *testNotary) Ping() error { + tlsConfig := tlsconfig.ClientDefault + tlsConfig.InsecureSkipVerify = true + client := http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: &tlsConfig, + }, + } + resp, err := client.Get(fmt.Sprintf("https://%s/v2/", t.address())) + if err != nil { + return err + } + if resp.StatusCode != 200 { + return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (t *testNotary) Close() { + t.cmd.Process.Kill() + os.RemoveAll(t.dir) +} + +func (s *DockerTrustSuite) trustedCmd(cmd *exec.Cmd) { + pwd := "12345678" + trustCmdEnv(cmd, s.not.address(), pwd, pwd) +} + +func (s *DockerTrustSuite) trustedCmdWithServer(cmd *exec.Cmd, server string) { + pwd := "12345678" + trustCmdEnv(cmd, server, pwd, pwd) +} + +func (s *DockerTrustSuite) trustedCmdWithPassphrases(cmd *exec.Cmd, offlinePwd, taggingPwd string) { + trustCmdEnv(cmd, s.not.address(), offlinePwd, taggingPwd) +} + +func trustCmdEnv(cmd *exec.Cmd, server, offlinePwd, taggingPwd string) { + env := []string{ + "DOCKER_CONTENT_TRUST=1", + fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), + fmt.Sprintf("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE=%s", offlinePwd), + fmt.Sprintf("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE=%s", taggingPwd), + } + cmd.Env = append(os.Environ(), env...) +} + +func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "rmi", repoName); status != 0 { + c.Fatalf("Error removing image %q\n%s", repoName, out) + } + + return repoName +} diff --git a/integration-cli/utils.go b/integration-cli/utils.go new file mode 100644 index 00000000..40f2fbd1 --- /dev/null +++ b/integration-cli/utils.go @@ -0,0 +1,350 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path" + "reflect" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/stringutils" +) + +func getExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +func processExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = getExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} + +func isKilled(err error) bool { + if exitErr, ok := err.(*exec.ExitError); ok { + status, ok := exitErr.Sys().(syscall.WaitStatus) + if !ok { + return false + } + // status.ExitStatus() is required on Windows because it does not + // implement Signal() nor Signaled(). Just check it had a bad exit + // status could mean it was killed (and in tests we do kill) + return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 + } + return false +} + +func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + exitCode = 0 + out, err := cmd.CombinedOutput() + exitCode = processExitCode(err) + output = string(out) + return +} + +func runCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { + var ( + stderrBuffer, stdoutBuffer bytes.Buffer + ) + exitCode = 0 + cmd.Stderr = &stderrBuffer + cmd.Stdout = &stdoutBuffer + err = cmd.Run() + exitCode = processExitCode(err) + + stdout = stdoutBuffer.String() + stderr = stderrBuffer.String() + return +} + +func runCommandWithOutputForDuration(cmd *exec.Cmd, duration time.Duration) (output string, exitCode int, timedOut bool, err error) { + var outputBuffer bytes.Buffer + if cmd.Stdout != nil { + err = errors.New("cmd.Stdout already set") + return + } + cmd.Stdout = &outputBuffer + + if cmd.Stderr != nil { + err = errors.New("cmd.Stderr already set") + return + } + cmd.Stderr = &outputBuffer + + done := make(chan error) + go func() { + exitErr := cmd.Run() + exitCode = processExitCode(exitErr) + done <- exitErr + }() + + select { + case <-time.After(duration): + killErr := cmd.Process.Kill() + if killErr != nil { + fmt.Printf("failed to kill (pid=%d): %v\n", cmd.Process.Pid, killErr) + } + timedOut = true + break + case err = <-done: + break + } + output = outputBuffer.String() + return +} + +var errCmdTimeout = fmt.Errorf("command timed out") + +func runCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { + var timedOut bool + output, exitCode, timedOut, err = runCommandWithOutputForDuration(cmd, timeout) + if timedOut { + err = errCmdTimeout + } + return +} + +func runCommand(cmd *exec.Cmd) (exitCode int, err error) { + exitCode = 0 + err = cmd.Run() + exitCode = processExitCode(err) + return +} + +func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + if len(cmds) < 2 { + return "", 0, errors.New("pipeline does not have multiple cmds") + } + + // connect stdin of each cmd to stdout pipe of previous cmd + for i, cmd := range cmds { + if i > 0 { + prevCmd := cmds[i-1] + cmd.Stdin, err = prevCmd.StdoutPipe() + + if err != nil { + return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) + } + } + } + + // start all cmds except the last + for _, cmd := range cmds[:len(cmds)-1] { + if err = cmd.Start(); err != nil { + return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) + } + } + + defer func() { + // wait all cmds except the last to release their resources + for _, cmd := range cmds[:len(cmds)-1] { + cmd.Wait() + } + }() + + // wait on last cmd + return runCommandWithOutput(cmds[len(cmds)-1]) +} + +func unmarshalJSON(data []byte, result interface{}) error { + if err := json.Unmarshal(data, result); err != nil { + return err + } + + return nil +} + +func convertSliceOfStringsToMap(input []string) map[string]struct{} { + output := make(map[string]struct{}) + for _, v := range input { + output[v] = struct{}{} + } + return output +} + +func waitForContainer(contID string, args ...string) error { + args = append([]string{"run", "--name", contID}, args...) + cmd := exec.Command(dockerBinary, args...) + if _, err := runCommand(cmd); err != nil { + return err + } + + if err := waitRun(contID); err != nil { + return err + } + + return nil +} + +func waitRun(contID string) error { + return waitInspect(contID, "{{.State.Running}}", "true", 5) +} + +func waitInspect(name, expr, expected string, timeout int) error { + after := time.After(time.Duration(timeout) * time.Second) + + for { + cmd := exec.Command(dockerBinary, "inspect", "-f", expr, name) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + if !strings.Contains(out, "No such") { + return fmt.Errorf("error executing docker inspect: %v\n%s", err, out) + } + select { + case <-after: + return err + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out = strings.TrimSpace(out) + if out == expected { + break + } + + select { + case <-after: + return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) + default: + } + + time.Sleep(100 * time.Millisecond) + } + return nil +} + +func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + +func listTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + +// randomUnixTmpDirPath provides a temporary unix path with rand string appended. +// does not create or checks if it exists. +func randomUnixTmpDirPath(s string) string { + return path.Join("/tmp", fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) +} + +// Reads chunkSize bytes from reader after every interval. +// Returns total read bytes. +func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + select { + case <-stop: + return + default: + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + time.Sleep(interval) + } + } +} + +// Parses 'procCgroupData', which is output of '/proc//cgroup', and returns +// a map which cgroup name as key and path as value. +func parseCgroupPaths(procCgroupData string) map[string]string { + cgroupPaths := map[string]string{} + for _, line := range strings.Split(procCgroupData, "\n") { + parts := strings.Split(line, ":") + if len(parts) != 3 { + continue + } + cgroupPaths[parts[1]] = parts[2] + } + return cgroupPaths +} + +type channelBuffer struct { + c chan []byte +} + +func (c *channelBuffer) Write(b []byte) (int, error) { + c.c <- b + return len(b), nil +} + +func (c *channelBuffer) Close() error { + close(c.c) + return nil +} + +func (c *channelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { + select { + case b := <-c.c: + return copy(p[0:], b), nil + case <-time.After(n): + return -1, fmt.Errorf("timeout reading from channel") + } +} + +func runAtDifferentDate(date time.Time, block func()) { + // Layout for date. MMDDhhmmYYYY + const timeLayout = "010203042006" + // Ensure we bring time back to now + now := time.Now().Format(timeLayout) + dateReset := exec.Command("date", now) + defer runCommand(dateReset) + + dateChange := exec.Command("date", date.Format(timeLayout)) + runCommand(dateChange) + block() + return +} diff --git a/links/links.go b/links/links.go new file mode 100644 index 00000000..d8e9730e --- /dev/null +++ b/links/links.go @@ -0,0 +1,147 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/docker/pkg/nat" +) + +type Link struct { + ParentIP string + ChildIP string + Name string + ChildEnvironment []string + Ports []nat.Port + IsEnabled bool +} + +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) (*Link, error) { + + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + l := &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } + return l, nil + +} + +func (l *Link) Alias() string { + _, alias := path.Split(l.Name) + return alias +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +func (l *Link) ToEnv() []string { + env := []string{} + alias := strings.Replace(strings.ToUpper(l.Alias()), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} + +func (l *Link) Enable() error { + l.IsEnabled = true + return nil +} + +func (l *Link) Disable() { + l.IsEnabled = false +} diff --git a/links/links_test.go b/links/links_test.go new file mode 100644 index 00000000..21952c3d --- /dev/null +++ b/links/links_test.go @@ -0,0 +1,234 @@ +package links + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/docker/pkg/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestLinkNaming(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) + if err != nil { + t.Fatal(err) + } + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + value, ok := env["DOCKER_1_PORT"] + + if !ok { + t.Fatalf("DOCKER_1_PORT not found in env") + } + + if value != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) + } +} + +func TestLinkNew(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) + if err != nil { + t.Fatal(err) + } + + if link == nil { + t.FailNow() + } + if link.Name != "/db/docker" { + t.Fail() + } + if link.Alias() != "docker" { + t.Fail() + } + if link.ParentIP != "172.0.17.3" { + t.Fail() + } + if link.ChildIP != "172.0.17.2" { + t.Fail() + } + for _, p := range link.Ports { + if p != newPortNoError("tcp", "6379") { + t.Fail() + } + } +} + +func TestLinkEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + if err != nil { + t.Fatal(err) + } + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkMultipleEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + if err != nil { + t.Fatal(err) + } + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkPortRangeEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + if err != nil { + t.Fatal(err) + } + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } + for i := range []int{6379, 6380, 6381} { + tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) + tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP+PORT", i) + tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP+PROTO", i) + tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) + if env[tcpaddr] == "172.0.17.2" { + t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) + } + if env[tcpport] == fmt.Sprintf("%d", i) { + t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) + } + if env[tcpproto] == "tcp" { + t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) + } + if env[tcp] == fmt.Sprintf("tcp://172.0.17.2:%d", i) { + t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) + } + } +} diff --git a/man/Dockerfile b/man/Dockerfile new file mode 100644 index 00000000..af231952 --- /dev/null +++ b/man/Dockerfile @@ -0,0 +1,7 @@ +FROM golang:1.4 +RUN mkdir -p /go/src/github.com/cpuguy83 +RUN mkdir -p /go/src/github.com/cpuguy83 \ + && git clone -b v1.0.3 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ + && cd /go/src/github.com/cpuguy83/go-md2man \ + && go get -v ./... +CMD ["/go/bin/go-md2man", "--help"] diff --git a/man/Dockerfile.5.md b/man/Dockerfile.5.md new file mode 100644 index 00000000..b4ef771a --- /dev/null +++ b/man/Dockerfile.5.md @@ -0,0 +1,329 @@ +% DOCKERFILE(5) Docker User Manuals +% Zac Dover +% May 2014 +# NAME + +Dockerfile - automate the steps of creating a Docker image + +# INTRODUCTION + +The **Dockerfile** is a configuration file that automates the steps of creating +a Docker image. It is similar to a Makefile. Docker reads instructions from the +**Dockerfile** to automate the steps otherwise performed manually to create an +image. To build an image, create a file called **Dockerfile**. + +The **Dockerfile** describes the steps taken to assemble the image. When the +**Dockerfile** has been created, call the `docker build` command, using the +path of directory that contains **Dockerfile** as the argument. + +# SYNOPSIS + +INSTRUCTION arguments + +For example: + + FROM image + +# DESCRIPTION + +A Dockerfile is a file that automates the steps of creating a Docker image. +A Dockerfile is similar to a Makefile. + +# USAGE + + docker build . + + -- Runs the steps and commits them, building a final image. + The path to the source repository defines where to find the context of the + build. The build is run by the Docker daemon, not the CLI. The whole + context must be transferred to the daemon. The Docker CLI reports + `"Sending build context to Docker daemon"` when the context is sent to the + daemon. + + ``` + docker build -t repository/tag . + ``` + + -- specifies a repository and tag at which to save the new image if the build + succeeds. The Docker daemon runs the steps one-by-one, committing the result + to a new image if necessary, before finally outputting the ID of the new + image. The Docker daemon automatically cleans up the context it is given. + + Docker re-uses intermediate images whenever possible. This significantly + accelerates the *docker build* process. + +# FORMAT + + `FROM image` + + `FROM image:tag` + + -- The **FROM** instruction sets the base image for subsequent instructions. A + valid Dockerfile must have **FROM** as its first instruction. The image can be any + valid image. It is easy to start by pulling an image from the public + repositories. + + -- **FROM** must be the first non-comment instruction in Dockerfile. + + -- **FROM** may appear multiple times within a single Dockerfile in order to create + multiple images. Make a note of the last image ID output by the commit before + each new **FROM** command. + + -- If no tag is given to the **FROM** instruction, Docker applies the + `latest` tag. If the used tag does not exist, an error is returned. + +**MAINTAINER** + -- **MAINTAINER** sets the Author field for the generated images. + +**RUN** + -- **RUN** has two forms: + + ``` + # the command is run in a shell - /bin/sh -c + RUN + + # Executable form + RUN ["executable", "param1", "param2"] + ``` + + + -- The **RUN** instruction executes any commands in a new layer on top of the current + image and commits the results. The committed image is used for the next step in + Dockerfile. + + -- Layering **RUN** instructions and generating commits conforms to the core + concepts of Docker where commits are cheap and containers can be created from + any point in the history of an image. This is similar to source control. The + exec form makes it possible to avoid shell string munging. The exec form makes + it possible to **RUN** commands using a base image that does not contain `/bin/sh`. + + Note that the exec form is parsed as a JSON array, which means that you must + use double-quotes (") around words not single-quotes ('). + +**CMD** + -- **CMD** has three forms: + + ``` + # Executable form + CMD ["executable", "param1", "param2"]` + + # Provide default arguments to ENTRYPOINT + CMD ["param1", "param2"]` + + # the command is run in a shell - /bin/sh -c + CMD command param1 param2 + ``` + + -- There can be only one **CMD** in a Dockerfile. If more than one **CMD** is listed, only + the last **CMD** takes effect. + The main purpose of a **CMD** is to provide defaults for an executing container. + These defaults may include an executable, or they can omit the executable. If + they omit the executable, an **ENTRYPOINT** must be specified. + When used in the shell or exec formats, the **CMD** instruction sets the command to + be executed when running the image. + If you use the shell form of the **CMD**, the `` executes in `/bin/sh -c`: + + Note that the exec form is parsed as a JSON array, which means that you must + use double-quotes (") around words not single-quotes ('). + + ``` + FROM ubuntu + CMD echo "This is a test." | wc - + ``` + + -- If you run **command** without a shell, then you must express the command as a + JSON array and give the full path to the executable. This array form is the + preferred form of **CMD**. All additional parameters must be individually expressed + as strings in the array: + + ``` + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + ``` + + -- To make the container run the same executable every time, use **ENTRYPOINT** in + combination with **CMD**. + If the user specifies arguments to `docker run`, the specified commands + override the default in **CMD**. + Do not confuse **RUN** with **CMD**. **RUN** runs a command and commits the result. + **CMD** executes nothing at build time, but specifies the intended command for + the image. + +**LABEL** + -- `LABEL [=] [[=] ...]` + The **LABEL** instruction adds metadata to an image. A **LABEL** is a + key-value pair. To include spaces within a **LABEL** value, use quotes and + backslashes as you would in command-line parsing. + + ``` + LABEL "com.example.vendor"="ACME Incorporated" + ``` + + An image can have more than one label. To specify multiple labels, separate + each key-value pair by a space. + + Labels are additive including `LABEL`s in `FROM` images. As the system + encounters and then applies a new label, new `key`s override any previous + labels with identical keys. + + To display an image's labels, use the `docker inspect` command. + +**EXPOSE** + -- `EXPOSE [...]` + The **EXPOSE** instruction informs Docker that the container listens on the + specified network ports at runtime. Docker uses this information to + interconnect containers using links, and to set up port redirection on the host + system. + +**ENV** + -- `ENV ` + The **ENV** instruction sets the environment variable to + the value ``. This value is passed to all future + RUN, **ENTRYPOINT**, and **CMD** instructions. This is + functionally equivalent to prefixing the command with `=`. The + environment variables that are set with **ENV** persist when a container is run + from the resulting image. Use `docker inspect` to inspect these values, and + change them using `docker run --env =`. + + Note that setting "`ENV DEBIAN_FRONTEND noninteractive`" may cause + unintended consequences, because it will persist when the container is run + interactively, as with the following command: `docker run -t -i image bash` + +**ADD** + -- **ADD** has two forms: + + ``` + ADD + + # Required for paths with whitespace + ADD ["",... ""] + ``` + + The **ADD** instruction copies new files, directories + or remote file URLs to the filesystem of the container at path ``. + Multiple `` resources may be specified but if they are files or directories + then they must be relative to the source directory that is being built + (the context of the build). The `` is the absolute path, or path relative + to **WORKDIR**, into which the source is copied inside the target container. + All new files and directories are created with mode 0755 and with the uid + and gid of **0**. + +**COPY** + -- **COPY** has two forms: + + ``` + COPY + + # Required for paths with whitespace + COPY ["",... ""] + ``` + + The **COPY** instruction copies new files from `` and + adds them to the filesystem of the container at path . The `` must be + the path to a file or directory relative to the source directory that is + being built (the context of the build) or a remote file URL. The `` is an + absolute path, or a path relative to **WORKDIR**, into which the source will + be copied inside the target container. All new files and directories are + created with mode **0755** and with the uid and gid of **0**. + +**ENTRYPOINT** + -- **ENTRYPOINT** has two forms: + + ``` + # executable form + ENTRYPOINT ["executable", "param1", "param2"]` + + # run command in a shell - /bin/sh -c + ENTRYPOINT command param1 param2 + ``` + + -- An **ENTRYPOINT** helps you configure a + container that can be run as an executable. When you specify an **ENTRYPOINT**, + the whole container runs as if it was only that executable. The **ENTRYPOINT** + instruction adds an entry command that is not overwritten when arguments are + passed to docker run. This is different from the behavior of CMD. This allows + arguments to be passed to the entrypoint, for instance `docker run -d` + passes the -d argument to the **ENTRYPOINT**. Specify parameters either in the + **ENTRYPOINT** JSON array (as in the preferred exec form above), or by using a **CMD** + statement. Parameters in the **ENTRYPOINT** are not overwritten by the docker run + arguments. Parameters specified via **CMD** are overwritten by docker run + arguments. Specify a plain string for the **ENTRYPOINT**, and it will execute in + `/bin/sh -c`, like a **CMD** instruction: + + ``` + FROM ubuntu + ENTRYPOINT wc -l - + ``` + + This means that the Dockerfile's image always takes stdin as input (that's + what "-" means), and prints the number of lines (that's what "-l" means). To + make this optional but default, use a **CMD**: + + ``` + FROM ubuntu + CMD ["-l", "-"] + ENTRYPOINT ["/usr/bin/wc"] + ``` + +**VOLUME** + -- `VOLUME ["/data"]` + The **VOLUME** instruction creates a mount point with the specified name and marks + it as holding externally-mounted volumes from the native host or from other + containers. + +**USER** + -- `USER daemon` + Sets the username or UID used for running subsequent commands. + + The **USER** instruction can optionally be used to set the group or GID. The + followings examples are all valid: + USER [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Until the **USER** instruction is set, instructions will be run as root. The USER + instruction can be used any number of times in a Dockerfile, and will only affect + subsequent commands. + +**WORKDIR** + -- `WORKDIR /path/to/workdir` + The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**, + **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can + be used multiple times in a single Dockerfile. Relative paths are defined + relative to the path of the previous **WORKDIR** instruction. For example: + + ``` + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + ``` + + In the above example, the output of the **pwd** command is **a/b/c**. + +**ONBUILD** + -- `ONBUILD [INSTRUCTION]` + The **ONBUILD** instruction adds a trigger instruction to an image. The + trigger is executed at a later time, when the image is used as the base for + another build. Docker executes the trigger in the context of the downstream + build, as if the trigger existed immediately after the **FROM** instruction in + the downstream Dockerfile. + + You can register any build instruction as a trigger. A trigger is useful if + you are defining an image to use as a base for building other images. For + example, if you are defining an application build environment or a daemon that + is customized with a user-specific configuration. + + Consider an image intended as a reusable python application builder. It must + add application source code to a particular directory, and might need a build + script called after that. You can't just call **ADD** and **RUN** now, because + you don't yet have access to the application source code, and it is different + for each application build. + + -- Providing application developers with a boilerplate Dockerfile to copy-paste + into their application is inefficient, error-prone, and + difficult to update because it mixes with application-specific code. + The solution is to use **ONBUILD** to register instructions in advance, to + run later, during the next build stage. + +# HISTORY +*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. +*Feb 2015, updated by Brian Goff (cpuguy83@gmail.com) for readability diff --git a/man/README.md b/man/README.md new file mode 100644 index 00000000..ca964026 --- /dev/null +++ b/man/README.md @@ -0,0 +1,33 @@ +Docker Documentation +==================== + +This directory contains the Docker user manual in the Markdown format. +Do *not* edit the man pages in the man1 directory. Instead, amend the +Markdown (*.md) files. + +# Generating man pages from the Markdown files + +The recommended approach for generating the man pages is via a Docker +container using the supplied `Dockerfile` to create an image with the correct +environment. This uses `go-md2man`, a pure Go Markdown to man page generator. + +## Building the md2man image + +There is a `Dockerfile` provided in the `docker/man` directory. + +Using this `Dockerfile`, create a Docker image tagged `docker/md2man`: + + docker build -t docker/md2man . + +## Utilizing the image + +Once the image is built, run a container using the image with *volumes*: + + docker run -v //docker/man:/docs:rw \ + -w /docs -i docker/md2man /docs/md2man-all.sh + +The `md2man` Docker container will process the Markdown files and generate +the man pages inside the `docker/man/man1` directory using +Docker volumes. For more information on Docker volumes see the man page for +`docker run` and also look at the article [Sharing Directories via Volumes] +(https://docs.docker.com/use/working_with_volumes/). diff --git a/man/docker-attach.1.md b/man/docker-attach.1.md new file mode 100644 index 00000000..1f73d8c9 --- /dev/null +++ b/man/docker-attach.1.md @@ -0,0 +1,70 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-attach - Attach to a running container + +# SYNOPSIS +**docker attach** +[**--help**]/ +[**--no-stdin**[=*false*]] +[**--sig-proxy**[=*true*]] +CONTAINER + +# DESCRIPTION +The **docker attach** command allows you to attach to a running container using +the container's ID or name, either to view its ongoing output or to control it +interactively. You can attach to the same contained process multiple times +simultaneously, screen sharing style, or quickly view the progress of your +daemonized process. + +You can detach from the container (and leave it running) with `CTRL-p CTRL-q` +(for a quiet exit) or `CTRL-c` which will send a `SIGKILL` to the container. +When you are attached to a container, and exit its main process, the process's +exit code will be returned to the client. + +It is forbidden to redirect the standard input of a `docker attach` command while +attaching to a tty-enabled container (i.e.: launched with `-t`). + +# OPTIONS +**--help** + Print usage statement + +**--no-stdin**=*true*|*false* + Do not attach STDIN. The default is *false*. + +**--sig-proxy**=*true*|*false* + Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. + +# EXAMPLES + +## Attaching to a container + +In this example the top command is run inside a container, from an image called +fedora, in detached mode. The ID from the container is passed into the **docker +attach** command: + + # ID=$(sudo docker run -d fedora /usr/bin/top -b) + # sudo docker attach $ID + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-build.1.md b/man/docker-build.1.md new file mode 100644 index 00000000..a8714b77 --- /dev/null +++ b/man/docker-build.1.md @@ -0,0 +1,239 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-build - Build a new image from the source code at PATH + +# SYNOPSIS +**docker build** +[**--help**] +[**-f**|**--file**[=*PATH/Dockerfile*]] +[**--force-rm**[=*false*]] +[**--no-cache**[=*false*]] +[**--pull**[=*false*]] +[**-q**|**--quiet**[=*false*]] +[**--rm**[=*true*]] +[**-t**|**--tag**[=*TAG*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--memory-swap**[=*MEMORY-SWAP*]] +[**-c**|**--cpu-shares**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--cgroup-parent**[=*CGROUP-PARENT*]] +[**--ulimit**[=*[]*]] + +PATH | URL | - + +# DESCRIPTION +This will read the Dockerfile from the directory specified in **PATH**. +It also sends any other files and directories found in the current +directory to the Docker daemon. The contents of this directory would +be used by **ADD** commands found within the Dockerfile. + +Warning, this will send a lot of data to the Docker daemon depending +on the contents of the current directory. The build is run by the Docker +daemon, not by the CLI, so the whole context must be transferred to the daemon. +The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to +the daemon. + +When the URL to a tarball archive or to a single Dockerfile is given, no context is sent from +the client to the Docker daemon. When a Git repository is set as the **URL**, the repository is +cloned locally and then sent as the context. + +# OPTIONS +**-f**, **--file**=*PATH/Dockerfile* + Path to the Dockerfile to use. If the path is a relative path and you are + building from a local directory, then the path must be relative to that + directory. If you are building from a remote URL pointing to either a + tarball or a Git repository, then the path must be relative to the root of + the remote context. In all cases, the file must be within the build context. + The default is *Dockerfile*. + +**--force-rm**=*true*|*false* + Always remove intermediate containers, even after unsuccessful builds. The default is *false*. + +**--no-cache**=*true*|*false* + Do not use cache when building the image. The default is *false*. + +**--help** + Print usage statement + +**--pull**=*true*|*false* + Always attempt to pull a newer version of the image. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Suppress the verbose output generated by the containers. The default is *false*. + +**--rm**=*true*|*false* + Remove intermediate containers after a successful build. The default is *true*. + +**-t**, **--tag**="" + Repository name (and optionally a tag) to be applied to the resulting image in case of success + +**-m**, **--memory**=*MEMORY* + Memory limit + +**--memory-swap**=*MEMORY-SWAP* + Total memory (memory + swap), '-1' to disable swap. + +**-c**, **--cpu-shares**=*0* + CPU shares (relative weight). + + By default, all containers get the same proportion of CPU cycles. You can + change this proportion by adjusting the container's CPU share weighting + relative to the weighting of all other running containers. + + To modify the proportion from the default of 1024, use the **-c** or + **--cpu-shares** flag to set the weighting to 2 or higher. + + The proportion is only applied when CPU-intensive processes are running. + When tasks in one container are idle, the other containers can use the + left-over CPU time. The actual amount of CPU time used varies depending on + the number of containers running on the system. + + For example, consider three containers, one has a cpu-share of 1024 and + two others have a cpu-share setting of 512. When processes in all three + containers attempt to use 100% of CPU, the first container would receive + 50% of the total CPU time. If you add a fourth container with a cpu-share + of 1024, the first container only gets 33% of the CPU. The remaining containers + receive 16.5%, 16.5% and 33% of the CPU. + + On a multi-core system, the shares of CPU time are distributed across the CPU + cores. Even if a container is limited to less than 100% of CPU time, it can + use 100% of each individual CPU core. + + For example, consider a system with more than three cores. If you start one + container **{C0}** with **-c=512** running one process, and another container + **{C1}** with **-c=1024** running two processes, this can result in the following + division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period. + + Limit the container's CPU usage. This flag causes the kernel to restrict the + container's CPU usage to the period you specify. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota. + + By default, containers run with the full CPU resource. This flag causes the +kernel to restrict the container's CPU usage to the quota you specify. + +**--cpuset-cpus**=*CPUSET-CPUS* + CPUs in which to allow execution (0-3, 0,1). + +**--cpuset-mems**=*CPUSET-MEMS* + Memory nodes (MEMs) in which to allow execution (-1-3, 0,1). Only effective on + NUMA systems. + + For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +to ensure the processes in your Docker container only use memory from the first +two memory nodes. + +**--cgroup-parent**=*CGROUP-PARENT* + Path to `cgroups` under which the container's `cgroup` are created. + + If the path is not absolute, the path is considered relative to the `cgroups` path of the init process. +Cgroups are created if they do not already exist. + +**--ulimit**=[] + Ulimit options + + For more information about `ulimit` see [Setting ulimits in a +container](https://docs.docker.com/reference/commandline/run/#setting-ulimits-in-a-container) + +# EXAMPLES + +## Building an image using a Dockerfile located inside the current directory + +Docker images can be built using the build command and a Dockerfile: + + docker build . + +During the build process Docker creates intermediate images. In order to +keep them, you must explicitly set `--rm=false`. + + docker build --rm=false . + +A good practice is to make a sub-directory with a related name and create +the Dockerfile in that directory. For example, a directory called mongo may +contain a Dockerfile to create a Docker MongoDB image. Likewise, another +directory called httpd may be used to store Dockerfiles for Apache web +server images. + +It is also a good practice to add the files required for the image to the +sub-directory. These files will then be specified with the `COPY` or `ADD` +instructions in the `Dockerfile`. + +Note: If you include a tar file (a good practice), then Docker will +automatically extract the contents of the tar file specified within the `ADD` +instruction into the specified target. + +## Building an image and naming that image + +A good practice is to give a name to the image you are building. Note that +only a-z0-9-_. should be used for consistency. There are no hard rules here but it is best to give the names consideration. + +The **-t**/**--tag** flag is used to rename an image. Here are some examples: + +Though it is not a good practice, image names can be arbitrary: + + docker build -t myimage . + +A better approach is to provide a fully qualified and meaningful repository, +name, and tag (where the tag in this context means the qualifier after +the ":"). In this example we build a JBoss image for the Fedora repository +and give it the version 1.0: + + docker build -t fedora/jboss:1.0 + +The next example is for the "whenry" user repository and uses Fedora and +JBoss and gives it the version 2.1 : + + docker build -t whenry/fedora-jboss:v2.1 + +If you do not provide a version tag then Docker will assign `latest`: + + docker build -t whenry/fedora-jboss + +When you list the images, the image above will have the tag `latest`. + +So renaming an image is arbitrary but consideration should be given to +a useful convention that makes sense for consumers and should also take +into account Docker community conventions. + + +## Building an image using a URL + +This will clone the specified GitHub repository from the URL and use it +as context. The Dockerfile at the root of the repository is used as +Dockerfile. This only works if the GitHub repository is a dedicated +repository. + + docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache + +Note: You can set an arbitrary Git repository via the `git://` schema. + +## Building an image using a URL to a tarball'ed context + +This will send the URL itself to the Docker daemon. The daemon will fetch the +tarball archive, decompress it and use its contents as the build context. If you +pass an *-f PATH/Dockerfile* option as well, the system will look for that file +inside the contents of the tarball. + + docker build -f dev/Dockerfile https://10.10.10.1/docker/context.tar.gz + +Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression). + +# HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Sally O'Malley diff --git a/man/docker-commit.1.md b/man/docker-commit.1.md new file mode 100644 index 00000000..329bc0c6 --- /dev/null +++ b/man/docker-commit.1.md @@ -0,0 +1,70 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-commit - Create a new image from a container's changes + +# SYNOPSIS +**docker commit** +[**-a**|**--author**[=*AUTHOR*]] +[**--help**] +[**-c**|**--change**[= []**]] +[**-m**|**--message**[=*MESSAGE*]] +[**-p**|**--pause**[=*true*]] +CONTAINER [REPOSITORY[:TAG]] + +# DESCRIPTION +Create a new image from an existing container specified by name or +container ID. The new image will contain the contents of the +container filesystem, *excluding* any data volumes. + +While the `docker commit` command is a convenient way of extending an +existing image, you should prefer the use of a Dockerfile and `docker +build` for generating images that you intend to share with other +people. + +# OPTIONS +**-a**, **--author**="" + Author (e.g., "John Hannibal Smith ") + +**-c** , **--change**=[] + Apply specified Dockerfile instructions while committing the image + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +**--help** + Print usage statement + +**-m**, **--message**="" + Commit message + +**-p**, **--pause**=*true*|*false* + Pause container during commit. The default is *true*. + +# EXAMPLES + +## Creating a new image from an existing container +An existing Fedora based container has had Apache installed while running +in interactive mode with the bash shell. Apache is also running. To +create a new image run `docker ps` to find the container's ID and then run: + + # docker commit -m="Added Apache to Fedora base image" \ + -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 + +Note that only a-z0-9-_. are allowed when naming images from an +existing container. + +## Apply specified Dockerfile instructions while committing the image +If an existing container was created without the DEBUG environment +variable set to "true", you can create a new image based on that +container by first getting the container's ID with `docker ps` and +then running: + + # docker commit -c="ENV DEBUG true" 98bd7fc99854 debug-image + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and in +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +Oct 2014, updated by Daniel, Dao Quang Minh +June 2015, updated by Sally O'Malley diff --git a/man/docker-cp.1.md b/man/docker-cp.1.md new file mode 100644 index 00000000..fe1cd9c9 --- /dev/null +++ b/man/docker-cp.1.md @@ -0,0 +1,151 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-cp - Copy files/folders between a container and the local filesystem. + +# SYNOPSIS +**docker cp** +[**--help**] +CONTAINER:PATH LOCALPATH|- +LOCALPATH|- CONTAINER:PATH + +# DESCRIPTION + +In the first synopsis form, the `docker cp` utility copies the contents of +`PATH` from the filesystem of `CONTAINER` to the `LOCALPATH` (or stream as +a tar archive to `STDOUT` if `-` is specified). + +In the second synopsis form, the contents of `LOCALPATH` (or a tar archive +streamed from `STDIN` if `-` is specified) are copied from the local machine to +`PATH` in the filesystem of `CONTAINER`. + +You can copy to or from either a running or stopped container. The `PATH` can +be a file or directory. The `docker cp` command assumes all `CONTAINER:PATH` +values are relative to the `/` (root) directory of the container. This means +supplying the initial forward slash is optional; The command sees +`compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. If a `LOCALPATH` value +is not absolute, is it considered relative to the current working directory. + +Behavior is similar to the common Unix utility `cp -a` in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group on the receiving end of the transfer. For example, +files copied to a container will be created with `UID:GID` of the root user. +Files copied to the local machine will be created with the `UID:GID` of the +user which invoked the `docker cp` command. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DST_PATH` does not exist + - the file is saved to a file created at `DST_PATH` + - `DST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DST_PATH` exists and is a file + - the destination is overwritten with the contents of the source file + - `DST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DST_PATH` does not exist + - `DST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` + - the source directory is copied into this directory + - `SRC_PAPTH` does end with `/.` + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied. + +A colon (`:`) is used as a delimiter between `CONTAINER` and `PATH`, but `:` +could also be in a valid `LOCALPATH`, like `file:name.txt`. This ambiguity is +resolved by requiring a `LOCALPATH` with a `:` to be made explicit with a +relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, and mounts created by the user in the container. + +Using `-` as the first argument in place of a `LOCALPATH` will stream the +contents of `STDIN` as a tar archive which will be extracted to the `PATH` in +the filesystem of the destination container. In this case, `PATH` must specify +a directory. + +Using `-` as the second argument in place of a `LOCALPATH` will stream the +contents of the resource from the source container as a tar archive to +`STDOUT`. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + +Suppose a container has finished producing some output as a file it saves +to somewhere in its filesystem. This could be the output of a build job or +some other computation. You can copy these outputs from the container to a +location on your local host. + +If you want to copy the `/tmp/foo` directory from a container to the +existing `/tmp` directory on your host. If you run `docker cp` in your `~` +(home) directory on the local host: + + $ docker cp compassionate_darwin:tmp/foo /tmp + +Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit +the leading slash in the command. If you execute this command from your home +directory: + + $ docker cp compassionate_darwin:tmp/foo tmp + +If `~/tmp` does not exist, Docker will create it and copy the contents of +`/tmp/foo` from the container into this new directory. If `~/tmp` already +exists as a directory, then Docker will copy the contents of `/tmp/foo` from +the container into a directory at `~/tmp/foo`. + +When copying a single file to an existing `LOCALPATH`, the `docker cp` command +will either overwrite the contents of `LOCALPATH` if it is a file or place it +into `LOCALPATH` if it is a directory, overwriting an existing file of the same +name if one exists. For example, this command: + + $ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /test + +If `/test` does not exist on the local machine, it will be created as a file +with the contents of `/tmp/foo/myfile.txt` from the container. If `/test` +exists as a file, it will be overwritten. Lastly, if `/tmp` exists as a +directory, the file will be copied to `/test/myfile.txt`. + +Next, suppose you want to copy a file or folder into a container. For example, +this could be a configuration file or some other input to a long running +computation that you would like to place into a created container before it +starts. This is useful because it does not require the configuration file or +other input to exist in the container image. + +If you have a file, `config.yml`, in the current directory on your local host +and wish to copy it to an existing directory at `/etc/my-app.d` in a container, +this command can be used: + + $ docker cp config.yml myappcontainer:/etc/my-app.d + +If you have several files in a local directory `/config` which you need to copy +to a directory `/etc/my-app.d` in a container: + + $ docker cp /config/. myappcontainer:/etc/my-app.d + +The above command will copy the contents of the local `/config` directory into +the directory `/etc/my-app.d` in the container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +May 2015, updated by Josh Hawn diff --git a/man/docker-create.1.md b/man/docker-create.1.md new file mode 100644 index 00000000..fb70cabf --- /dev/null +++ b/man/docker-create.1.md @@ -0,0 +1,258 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-create - Create a new container + +# SYNOPSIS +**docker create** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**-c**|**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cgroup-parent**[=*CGROUP-PATH*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--device**[=*[]*]] +[**--dns**[=*[]*]] +[**--dns-search**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**--group-add**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**--help**] +[**-i**|**--interactive**[=*false*]] +[**--ipc**[=*IPC*]] +[**-l**|**--label**[=*[]*]] +[**--label-file**[=*[]*]] +[**--link**[=*[]*]] +[**--log-driver**[=*[]*]] +[**--log-opt**[=*[]*]] +[**--lxc-conf**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] +[**--memory-swap**[=*MEMORY-SWAP*]] +[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] +[**--name**[=*NAME*]] +[**--net**[=*"bridge"*]] +[**--oom-kill-disable**[=*false*]] +[**-P**|**--publish-all**[=*false*]] +[**-p**|**--publish**[=*[]*]] +[**--pid**[=*[]*]] +[**--privileged**[=*false*]] +[**--read-only**[=*false*]] +[**--restart**[=*RESTART*]] +[**--security-opt**[=*[]*]] +[**-t**|**--tty**[=*false*]] +[**-u**|**--user**[=*USER*]] +[**--ulimit**[=*[]*]] +[**--uts**[=*[]*]] +[**-v**|**--volume**[=*[]*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] +IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Creates a writeable container layer over the specified image and prepares it for +running the specified command. The container ID is then printed to STDOUT. This +is similar to **docker run -d** except the container is never started. You can +then use the **docker start ** command to start the container at +any point. + +The initial status of the container created with **docker create** is 'created'. + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + +**--blkio-weight**=0 + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**-c**, **--cpu-shares**=0 + CPU shares (relative weight) + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cidfile**="" + Write the container ID to the file + +**--cgroup-parent**="" + Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +**--cpu-period**=0 + Limit the CPU CFS (Completely Fair Scheduler) period + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**-cpu-quota**=0 + Limit the CPU CFS (Completely Fair Scheduler) quota + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) + +**--dns-search**=[] + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) + +**--dns**=[] + Set custom DNS servers + +**-e**, **--env**=[] + Set environment variables + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + +**--env-file**=[] + Read in a line delimited file of environment variables + +**--expose**=[] + Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host + +**--group-add**=[] + Add additional groups to run as + +**-h**, **--hostname**="" + Container host name + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + +**-l**, **--label**=[] + Adds metadata to a container (e.g., --label=com.example.key=value) + +**--label-file**=[] + Read labels from a file. Delimit each label with an EOL. + +**--link**=[] + Add link to another container in the form of :alias or just + in which case the alias will match the name. + +**--lxc-conf**=[] + (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + +**--log-driver**="|*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*none*" + Logging driver for container. Default is defined by daemon `--log-driver` flag. + **Warning**: `docker logs` command works only for `json-file` logging driver. + +**--log-opt**=[] + Logging driver specific options. + +**-m**, **--memory**="" + Memory limit (format: , where unit = b, k, m or g) + + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the **-m** memory setting can be larger than physical +RAM. If a limit of 0 is specified (not using **-m**), the container's memory is +not limited. The actual limit may be rounded up to a multiple of the operating +system's page size (the value would be very large, that's millions of trillions). + +**--memory-swap**="" + Total memory limit (memory + swap) + + Set `-1` to disable swap (format: , where unit = b, k, m or g). +This value should always larger than **-m**, so you should always use this with **-m**. + +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + +**--name**="" + Assign a name to the container + +**--net**="bridge" + Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + +**--oom-kill-disable**=*true*|*false* + Whether to disable OOM Killer for the container or not. + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to random ports on the host interfaces. The default is *false*. + +**-p**, **--publish**=[] + Publish a container's port, or a range of ports, to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + Both hostPort and containerPort can be specified as a range of ports. + When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) + (use 'docker port' to see the actual mapping) + +**--pid**=host + Set the PID mode for the container + **host**: use the host's PID namespace inside the container. + Note: the host mode gives the container full access to local PID and is therefore considered insecure. + +**--uts**=host + Set the UTS mode for the container + **host**: use the host's UTS namespace inside the container. + Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + +**--read-only**=*true*|*false* + Mount the container's root filesystem as read only. + +**--restart**="no" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always) + +**--security-opt**=[] + Security Options + +**--memory-swappiness**="" + Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**-u**, **--user**="" + Username or UID + +**--ulimit**=[] + Ulimit options + +**-v**, **--volume**=[] + Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) + +**--volumes-from**=[] + Mount volumes from the specified container(s) + +**-w**, **--workdir**="" + Working directory inside the container + +# HISTORY +August 2014, updated by Sven Dowideit +September 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/man/docker-diff.1.md b/man/docker-diff.1.md new file mode 100644 index 00000000..6c6c5025 --- /dev/null +++ b/man/docker-diff.1.md @@ -0,0 +1,49 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-diff - Inspect changes on a container's filesystem + +# SYNOPSIS +**docker diff** +[**--help**] +CONTAINER + +# DESCRIPTION +Inspect changes on a container's filesystem. You can use the full or +shortened container ID or the container name set using +**docker run --name** option. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES +Inspect the changes to on a nginx container: + + # docker diff 1fdfd1f54c1b + C /dev + C /dev/console + C /dev/core + C /dev/stdout + C /dev/fd + C /dev/ptmx + C /dev/stderr + C /dev/stdin + C /run + A /run/nginx.pid + C /var/lib/nginx/tmp + A /var/lib/nginx/tmp/client_body + A /var/lib/nginx/tmp/fastcgi + A /var/lib/nginx/tmp/proxy + A /var/lib/nginx/tmp/scgi + A /var/lib/nginx/tmp/uwsgi + C /var/log/nginx + A /var/log/nginx/access.log + A /var/log/nginx/error.log + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-events.1.md b/man/docker-events.1.md new file mode 100644 index 00000000..f854bbc1 --- /dev/null +++ b/man/docker-events.1.md @@ -0,0 +1,86 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-events - Get real time events from the server + +# SYNOPSIS +**docker events** +[**--help**] +[**-f**|**--filter**[=*[]*]] +[**--since**[=*SINCE*]] +[**--until**[=*UNTIL*]] + + +# DESCRIPTION +Get event information from the Docker daemon. Information can include historical +information and real-time information. + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--filter**=[] + Provide filter values (i.e., 'event=stop') + +**--since**="" + Show all events created since timestamp + +**--until**="" + Stream events until this timestamp + +You can specify `--since` and `--until` parameters as an RFC 3339 date, +a UNIX timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Docker computes +the date relative to the client machine’s time. + +# EXAMPLES + +## Listening for Docker events + +After running docker events a container 786d698004576 is started and stopped +(The container name has been shortened in the output below): + + # docker events + 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) start + 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) die + 2015-01-28T20:21:32.000000000-08:00 59211849bc10: (from whenry/testimage:latest) stop + +## Listening for events since a given date +Again the output container IDs have been shortened for the purposes of this document: + + # docker events --since '2015-01-28' + 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create + 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create + 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:40.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die + 2015-01-28T20:25:42.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop + 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die + 2015-01-28T20:25:46.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop + +The following example outputs all events that were generated in the last 3 minutes, +relative to the current time on the client machine: + + # docker events --since '3m' + 2015-05-12T11:51:30.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2015-05-12T15:52:12.999999999Z07:00 4 4386fb97867d: (from ubuntu-1:14.04) stop + 2015-05-12T15:53:45.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2015-05-12T15:54:03.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + +If you do not provide the --since option, the command returns only new and/or +live events. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Brian Goff diff --git a/man/docker-exec.1.md b/man/docker-exec.1.md new file mode 100644 index 00000000..c1de7b59 --- /dev/null +++ b/man/docker-exec.1.md @@ -0,0 +1,51 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-exec - Run a command in a running container + +# SYNOPSIS +**docker exec** +[**-d**|**--detach**[=*false*]] +[**--help**] +[**-i**|**--interactive**[=*false*]] +[**-t**|**--tty**[=*false*]] +[**-u**|**--user**[=*USER*]] +CONTAINER COMMAND [ARG...] + +# DESCRIPTION + +Run a process in a running container. + +The command started using `docker exec` will only run while the container's primary +process (`PID 1`) is running, and will not be restarted if the container is restarted. + +If the container is paused, then the `docker exec` command will wait until the +container is unpaused, and then run + +# OPTIONS +**-d**, **--detach**=*true*|*false* + Detached mode: run command in the background. The default is *false*. + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. + +The **-t** option is incompatible with a redirection of the docker client +standard input. + +# HISTORY +November 2014, updated by Sven Dowideit diff --git a/man/docker-export.1.md b/man/docker-export.1.md new file mode 100644 index 00000000..f3096eac --- /dev/null +++ b/man/docker-export.1.md @@ -0,0 +1,44 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-export - Export the contents of a container's filesystem as a tar archive + +# SYNOPSIS +**docker export** +[**--help**] +CONTAINER + +# DESCRIPTION +Export the contents of a container's filesystem using the full or shortened +container ID or container name. The output is exported to STDOUT and can be +redirected to a tar file. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**--help** + Print usage statement +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES +Export the contents of the container called angry_bell to a tar file +called angry_bell.tar: + + # docker export angry_bell > angry_bell.tar + # docker export --output=angry_bell-latest.tar angry_bell + # ls -sh angry_bell.tar + 321M angry_bell.tar + # ls -sh angry_bell-latest.tar + 321M angry_bell-latest.tar + +# See also +**docker-import(1)** to create an empty filesystem image +and import the contents of the tarball into it, then optionally tag it. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +January 2015, updated by Joseph Kern (josephakern at gmail dot com) diff --git a/man/docker-history.1.md b/man/docker-history.1.md new file mode 100644 index 00000000..268e378d --- /dev/null +++ b/man/docker-history.1.md @@ -0,0 +1,51 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-history - Show the history of an image + +# SYNOPSIS +**docker history** +[**--help**] +[**--no-trunc**[=*false*]] +[**-q**|**--quiet**[=*false*]] +IMAGE + +# DESCRIPTION + +Show the history of when and how an image was created. + +# OPTIONS +**--help** + Print usage statement + +**-H**. **--human**=*true*|*false* + Print sizes and dates in human readable format. The default is *true*. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + $ docker history fedora + IMAGE CREATED CREATED BY SIZE COMMENT + 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB + 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 10 months ago 0 B Imported from - + +## Display comments in the image history +The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history. + + $ sudo docker history docker:scm + IMAGE CREATED CREATED BY SIZE COMMENT + 2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image + 88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB + c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 19 months ago 0 B Imported from - + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-images.1.md b/man/docker-images.1.md new file mode 100644 index 00000000..16dd8647 --- /dev/null +++ b/man/docker-images.1.md @@ -0,0 +1,84 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-images - List images + +# SYNOPSIS +**docker images** +[**--help**] +[**-a**|**--all**[=*false*]] +[**--digests**[=*false*]] +[**-f**|**--filter**[=*[]*]] +[**--no-trunc**[=*false*]] +[**-q**|**--quiet**[=*false*]] +[REPOSITORY] + +# DESCRIPTION +This command lists the images stored in the local Docker repository. + +By default, intermediate images, used during builds, are not listed. Some of the +output, e.g., image ID, is truncated, for space reasons. However the truncated +image ID, and often the first few characters, are enough to be used in other +Docker commands that use the image ID. The output includes repository, tag, image +ID, date created and the virtual size. + +The title REPOSITORY for the first title may seem confusing. It is essentially +the image name. However, because you can tag a specific image, and multiple tags +(image instances) can be associated with a single name, the name is really a +repository for all tagged images of the same name. For example consider an image +called fedora. It may be tagged with 18, 19, or 20, etc. to manage different +versions. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all images (by default filter out the intermediate image layers). The default is *false*. + +**--digests**=*true*|*false* + Show image digests. The default is *false*. + +**-f**, **--filter**=[] + Filters the output. The dangling=true filter finds unused images. While label=com.foo=amd64 filters for images with a com.foo value of amd64. The label=com.foo filter finds images with the label com.foo of any value. + +**--help** + Print usage statement + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + +## Listing the images + +To list the images in a local repository (not the registry) run: + + docker images + +The list will contain the image repository name, a tag for the image, and an +image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, +IMAGE ID, CREATED, and VIRTUAL SIZE. + +To get a verbose list of images which contains all the intermediate images +used in builds use **-a**: + + docker images -a + +Previously, the docker images command supported the --tree and --dot arguments, +which displayed different visualizations of the image data. Docker core removed +this functionality in the 1.7 version. If you liked this functionality, you can +still find it in the third-party dockviz tool: https://github.com/justone/dockviz. + +## Listing only the shortened image IDs + +Listing just the shortened image IDs. This can be useful for some automated +tools. + + docker images -q + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-import.1.md b/man/docker-import.1.md new file mode 100644 index 00000000..5ac68667 --- /dev/null +++ b/man/docker-import.1.md @@ -0,0 +1,64 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. + +# SYNOPSIS +**docker import** +[**-c**|**--change**[= []**]] +[**--help**] +file|URL|- [REPOSITORY[:TAG]] + +# OPTIONS +**-c**, **--change**=[] + Apply specified Dockerfile instructions while importing the image + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +# DESCRIPTION +Create a new filesystem image from the contents of a tarball (`.tar`, +`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + +## Import from a remote location + + # docker import http://example.com/exampleimage.tgz example/imagerepo + +## Import from a local file + +Import to docker via pipe and stdin: + + # cat exampleimage.tgz | docker import - example/imagelocal + +Import to a Docker image from a local file. + + # docker import /path/to/exampleimage.tgz + + +## Import from a local file and tag + +Import to docker via pipe and stdin: + + # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 + +## Import from a local directory + + # tar -c . | docker import - exampleimagedir + +## Apply specified Dockerfile instructions while importing the image +This example sets the docker image ENV variable DEBUG to true by default. + + # tar -c . | docker import -c="ENV DEBUG true" - exampleimagedir + +# See also +**docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-info.1.md b/man/docker-info.1.md new file mode 100644 index 00000000..a3bbd798 --- /dev/null +++ b/man/docker-info.1.md @@ -0,0 +1,49 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-info - Display system-wide information + +# SYNOPSIS +**docker info** +[**--help**] + + +# DESCRIPTION +This command displays system wide information regarding the Docker installation. +Information displayed includes the number of containers and images, pool name, +data file, metadata file, data space used, total data space, metadata space used +, total metadata space, execution driver, and the kernel version. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + +## Display Docker system information + +Here is a sample output: + + # docker info + Containers: 14 + Images: 52 + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Dirs: 80 + Execution Driver: native-0.2 + Logging Driver: json-file + Kernel Version: 3.13.0-24-generic + Operating System: Ubuntu 14.04 LTS + CPUs: 1 + Total Memory: 2 GiB + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-inspect.1.md b/man/docker-inspect.1.md new file mode 100644 index 00000000..d4234319 --- /dev/null +++ b/man/docker-inspect.1.md @@ -0,0 +1,290 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-inspect - Return low-level information on a container or image + +# SYNOPSIS +**docker inspect** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] +[**--type**=*container*|*image*] +CONTAINER|IMAGE [CONTAINER|IMAGE...] + +# DESCRIPTION + +This displays all the information available in Docker for a given +container or image. By default, this will render all results in a JSON +array. If a format is specified, the given template will be executed for +each result. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given go template. + +**--type**=*container*|*image* + Return JSON for specified type, permissible values are "image" or "container" + +# EXAMPLES + +Getting information on an image where image name conflict with the container name, +e,g both image and container are named rhel7. + + $ docker inspect --type=image rhel7 + [ + { + "Id": "fe01a428b9d9de35d29531e9994157978e8c48fa693e1bf1d221dffbbb67b170", + "Parent": "10acc31def5d6f249b548e01e8ffbaccfd61af0240c17315a7ad393d022c5ca2", + .... + } + ] + +## Getting information on a container + +To get information on a container use its ID or instance name: + + $ docker inspect d2cc496561d6 + [{ + "Id": "d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", + "Created": "2015-06-08T16:18:02.505155285Z", + "Path": "bash", + "Args": [], + "State": { + "Running": false, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 0, + "ExitCode": 0, + "Error": "", + "StartedAt": "2015-06-08T16:18:03.643865954Z", + "FinishedAt": "2015-06-08T16:57:06.448552862Z" + }, + "Image": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "NetworkSettings": { + "Bridge": "", + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "HairpinMode": false, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "MacAddress": "", + "NetworkID": "", + "PortMapping": null, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null + }, + "ResolvConfPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hostname", + "HostsPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hosts", + "LogPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47-json.log", + "Name": "/adoring_wozniak", + "RestartCount": 0, + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "MountLabel": "", + "ProcessLabel": "", + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "AppArmorProfile": "", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "CpuPeriod": 0, + "CpusetCpus": "", + "CpusetMems": "", + "CpuQuota": 0, + "BlkioWeight": 0, + "OomKillDisable": false, + "Privileged": false, + "PortBindings": {}, + "Links": null, + "PublishAllPorts": false, + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "VolumesFrom": null, + "Devices": [], + "NetworkMode": "bridge", + "IpcMode": "", + "PidMode": "", + "UTSMode": "", + "CapAdd": null, + "CapDrop": null, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "SecurityOpt": null, + "ReadonlyRootfs": false, + "Ulimits": null, + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "CgroupParent": "" + }, + "GraphDriver": { + "Name": "devicemapper", + "Data": { + "DeviceId": "5", + "DeviceName": "docker-253:1-2763198-d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", + "DeviceSize": "171798691840" + } + }, + "Config": { + "Hostname": "d2cc496561d6", + "Domainname": "", + "User": "", + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "ExposedPorts": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": true, + "Env": null, + "Cmd": [ + "bash" + ], + "Image": "fedora", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "Cpuset": "" + } + } + ] +## Getting the IP address of a container instance + +To get the IP address of a container use: + + $ docker inspect --format='{{.NetworkSettings.IPAddress}}' d2cc496561d6 + 172.17.0.2 + +## Listing all port bindings + +One can loop over arrays and maps in the results to produce simple text +output: + + $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ + {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' d2cc496561d6 + 80/tcp -> 80 + +You can get more information about how to write a go template from: +http://golang.org/pkg/text/template/. + +## Getting information on an image + +Use an image's ID or name (e.g., repository/name[:tag]) to get information +on it. + + $ docker inspect ded7cd95e059 + [{ + "Id": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "Parent": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Comment": "", + "Created": "2015-05-27T16:58:22.937503085Z", + "Container": "76cf7f67d83a7a047454b33007d03e32a8f474ad332c3a03c94537edd22b312b", + "ContainerConfig": { + "Hostname": "76cf7f67d83a", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ADD file:4be46382bcf2b095fcb9fe8334206b584eff60bb3fad8178cbd97697fcb2ea83 in /" + ], + "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {} + }, + "DockerVersion": "1.6.0", + "Author": "Lokesh Mandvekar \u003clsm5@fedoraproject.org\u003e", + "Config": { + "Hostname": "76cf7f67d83a", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": null, + "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {} + }, + "Architecture": "amd64", + "Os": "linux", + "Size": 186507296, + "VirtualSize": 186507296, + "GraphDriver": { + "Name": "devicemapper", + "Data": { + "DeviceId": "3", + "DeviceName": "docker-253:1-2763198-ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "DeviceSize": "171798691840" + } + } + } + ] + +# HISTORY +April 2014, originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Qiang Huang diff --git a/man/docker-kill.1.md b/man/docker-kill.1.md new file mode 100644 index 00000000..cfab3f8e --- /dev/null +++ b/man/docker-kill.1.md @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-kill - Kill a running container using SIGKILL or a specified signal + +# SYNOPSIS +**docker kill** +[**--help**] +[**-s**|**--signal**[=*"KILL"*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The main process inside each container specified will be sent SIGKILL, + or any signal specified with option --signal. + +# OPTIONS +**--help** + Print usage statement + +**-s**, **--signal**="KILL" + Signal to send to the container + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) + based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-load.1.md b/man/docker-load.1.md new file mode 100644 index 00000000..1a43a5e0 --- /dev/null +++ b/man/docker-load.1.md @@ -0,0 +1,45 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-load - Load an image from a tar archive or STDIN + +# SYNOPSIS +**docker load** +[**--help**] +[**-i**|**--input**[=*INPUT*]] + + +# DESCRIPTION + +Loads a tarred repository from a file or the standard input stream. +Restores both images and tags. + +# OPTIONS +**--help** + Print usage statement + +**-i**, **--input**="" + Read from a tar archive file, instead of STDIN + +# EXAMPLES + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ docker load --input fedora.tar + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB + +# See also +**docker-save(1)** to save an image(s) to a tar archive (streamed to STDOUT by default). + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-login.1.md b/man/docker-login.1.md new file mode 100644 index 00000000..b87e2c15 --- /dev/null +++ b/man/docker-login.1.md @@ -0,0 +1,51 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-login - Register or log in to a Docker registry. + +# SYNOPSIS +**docker login** +[**-e**|**--email**[=*EMAIL*]] +[**--help**] +[**-p**|**--password**[=*PASSWORD*]] +[**-u**|**--username**[=*USERNAME*]] +[SERVER] + +# DESCRIPTION +Register or log in to a Docker Registry located on the specified +`SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you +do not specify a `SERVER`, the command uses Docker's public registry located at +`https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. + +You can log into any public or private repository for which you have +credentials. When you log in, the command stores encoded credentials in +`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. + +# OPTIONS +**-e**, **--email**="" + Email + +**--help** + Print usage statement + +**-p**, **--password**="" + Password + +**-u**, **--username**="" + Username + +# EXAMPLES + +## Login to a registry on your localhost + + # docker login localhost:8080 + +# See also +**docker-logout(1)** to log out from a Docker registry. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 diff --git a/man/docker-logout.1.md b/man/docker-logout.1.md new file mode 100644 index 00000000..d1169867 --- /dev/null +++ b/man/docker-logout.1.md @@ -0,0 +1,32 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logout - Log out from a Docker registry. + +# SYNOPSIS +**docker logout** +[SERVER] + +# DESCRIPTION +Log out of a Docker Registry located on the specified `SERVER`. You can +specify a URL or a `hostname` for the `SERVER` value. If you do not specify a +`SERVER`, the command attempts to log you out of Docker's public registry +located at `https://registry-1.docker.io/` by default. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Log out from a registry on your localhost + + # docker logout localhost:8080 + +# See also +**docker-login(1)** to register or log in to a Docker registry server. + +# HISTORY +June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) +July 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 diff --git a/man/docker-logs.1.md b/man/docker-logs.1.md new file mode 100644 index 00000000..8ecc20df --- /dev/null +++ b/man/docker-logs.1.md @@ -0,0 +1,55 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logs - Fetch the logs of a container + +# SYNOPSIS +**docker logs** +[**-f**|**--follow**[=*false*]] +[**--help**] +[**--since**[=*SINCE*]] +[**-t**|**--timestamps**[=*false*]] +[**--tail**[=*"all"*]] +CONTAINER + +# DESCRIPTION +The **docker logs** command batch-retrieves whatever logs are present for +a container at the time of execution. This does not guarantee execution +order when combined with a docker run (i.e., your run may not have generated +any logs at the time you execute docker logs). + +The **docker logs --follow** command combines commands **docker logs** and +**docker attach**. It will first return all logs from the beginning and +then continue streaming new output from the container’s stdout and stderr. + +**Warning**: This command works only for **json-file** logging driver. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--follow**=*true*|*false* + Follow log output. The default is *false*. + +**--since**="" + Show logs since timestamp + +**-t**, **--timestamps**=*true*|*false* + Show timestamps. The default is *false*. + +**--tail**="all" + Output the specified number of lines at the end of logs (defaults to all logs) + +The `--since` option shows only the container logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Docker computes +the date relative to the client machine’s time. You can combine +the `--since` option with either or both of the `--follow` or `--tail` options. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +April 2015, updated by Ahmet Alp Balkan diff --git a/man/docker-pause.1.md b/man/docker-pause.1.md new file mode 100644 index 00000000..5d2267af --- /dev/null +++ b/man/docker-pause.1.md @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pause - Pause all processes within a container + +# SYNOPSIS +**docker pause** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The `docker pause` command uses the cgroups freezer to suspend all processes in +a container. Traditionally when suspending a process the `SIGSTOP` signal is +used, which is observable by the process being suspended. With the cgroups freezer +the process is unaware, and unable to capture, that it is being suspended, +and subsequently resumed. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + +# OPTIONS +There are no available options. + +# See also +**docker-unpause(1)** to unpause all processes within a container. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/man/docker-port.1.md b/man/docker-port.1.md new file mode 100644 index 00000000..83e9cf93 --- /dev/null +++ b/man/docker-port.1.md @@ -0,0 +1,47 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# SYNOPSIS +**docker port** +[**--help**] +CONTAINER [PRIVATE_PORT[/PROTO]] + +# DESCRIPTION +List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + + # docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + +## Find out all the ports mapped + + # docker port test + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + +## Find out a specific mapping + + # docker port test 7890/tcp + 0.0.0.0:4321 + + # docker port test 7890 + 0.0.0.0:4321 + +## An example showing error for non-existent mapping + + # docker port test 7890/udp + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/man/docker-ps.1.md b/man/docker-ps.1.md new file mode 100644 index 00000000..0fdf7ccc --- /dev/null +++ b/man/docker-ps.1.md @@ -0,0 +1,132 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% FEBRUARY 2015 +# NAME +docker-ps - List containers + +# SYNOPSIS +**docker ps** +[**-a**|**--all**[=*false*]] +[**--before**[=*BEFORE*]] +[**--help**] +[**-f**|**--filter**[=*[]*]] +[**-l**|**--latest**[=*false*]] +[**-n**[=*-1*]] +[**--no-trunc**[=*false*]] +[**-q**|**--quiet**[=*false*]] +[**-s**|**--size**[=*false*]] +[**--since**[=*SINCE*]] +[**--format**=*"TEMPLATE"*] + + +# DESCRIPTION + +List the containers in the local repository. By default this shows only +the running containers. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all containers. Only running containers are shown by default. The default is *false*. + +**--before**="" + Show only containers created before Id or Name, including non-running containers. + +**--help** + Print usage statement + +**-f**, **--filter**=[] + Provide filter values. Valid filters: + exited= - containers with exit code of + label= or label== + status=(created|restarting|running|paused|exited) + name= - container's name + id= - container's ID + +**-l**, **--latest**=*true*|*false* + Show only the latest created container, include non-running ones. The default is *false*. + +**-n**=-1 + Show n last created containers, include non-running ones. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only display numeric IDs. The default is *false*. + +**-s**, **--size**=*true*|*false* + Display total file sizes. The default is *false*. + +**--since**="" + Show only containers created since Id or Name, include non-running ones. + +**--format**=*"TEMPLATE"* + Pretty-print containers using a Go template. + Valid placeholders: + .ID - Container ID + .Image - Image ID + .Command - Quoted command + .CreatedAt - Time when the container was created. + .RunningFor - Elapsed time since the container was started. + .Ports - Exposed ports. + .Status - Container status. + .Size - Container disk size. + .Labels - All labels asigned to the container. + .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` + +# EXAMPLES +# Display all containers, including non-running + + # docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain + 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell + c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds + 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike + +# Display only IDs of all containers, including non-running + + # docker ps -a -q + a87ecb4f327c + 01946d9d34d8 + c1d3b0166030 + 41d50ecd2f57 + +# Display only IDs of all containers that have the name `determined_torvalds` + + # docker ps -a -q --filter=name=determined_torvalds + c1d3b0166030 + +# Display containers with their commands + + # docker ps --format "{{.ID}}: {{.Command}}" + a87ecb4f327c: /bin/sh -c #(nop) MA + 01946d9d34d8: /bin/sh -c #(nop) MA + c1d3b0166030: /bin/sh -c yum -y up + 41d50ecd2f57: /bin/sh -c #(nop) MA + +# Display containers with their labels in a table + + # docker ps --format "table {{.ID}}\t{{.Labels}}" + CONTAINER ID LABELS + a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd + 01946d9d34d8 + c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 + 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd + +# Display containers with their node label in a table + + # docker ps --format 'table {{.ID}}\t{{(.Label "com.docker.swarm.node")}}' + CONTAINER ID NODE + a87ecb4f327c ubuntu + 01946d9d34d8 + c1d3b0166030 debian + 41d50ecd2f57 fedora + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit +February 2015, updated by André Martins diff --git a/man/docker-pull.1.md b/man/docker-pull.1.md new file mode 100644 index 00000000..5a2cd83e --- /dev/null +++ b/man/docker-pull.1.md @@ -0,0 +1,74 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pull - Pull an image or a repository from a registry + +# SYNOPSIS +**docker pull** +[**-a**|**--all-tags**[=*false*]] +[**--help**] +NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] + +# DESCRIPTION + +This command pulls down an image or a repository from a registry. If +there is more than one image for a repository (e.g., fedora) then all +images for that repository name are pulled down including any tags. + +If you do not specify a `REGISTRY_HOST`, the command uses Docker's public +registry located at `registry-1.docker.io` by default. + +# OPTIONS +**-a**, **--all-tags**=*true*|*false* + Download all tagged images in the repository. The default is *false*. + +**--help** + Print usage statement + +# EXAMPLE + +# Pull a repository with multiple images +# Note that if the image is previously downloaded then the status would be +# 'Status: Image is up to date for fedora' + + $ docker pull fedora + Pulling repository fedora + ad57ef8d78d7: Download complete + 105182bb5e8b: Download complete + 511136ea3c5a: Download complete + 73bd853d2ea5: Download complete + + Status: Downloaded newer image for fedora + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB + fedora 20 105182bb5e8b 5 days ago 372.7 MB + fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB + fedora latest 105182bb5e8b 5 days ago 372.7 MB + +# Pull an image, manually specifying path to Docker's public registry and tag +# Note that if the image is previously downloaded then the status would be +# 'Status: Image is up to date for registry.hub.docker.com/fedora:20' + + $ docker pull registry.hub.docker.com/fedora:20 + Pulling repository fedora + 3f2fed40e4b0: Download complete + 511136ea3c5a: Download complete + fd241224e9cf: Download complete + + Status: Downloaded newer image for registry.hub.docker.com/fedora:20 + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + fedora 20 3f2fed40e4b0 4 days ago 372.7 MB + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit +April 2015, updated by John Willis +April 2015, updated by Mary Anthony for v2 diff --git a/man/docker-push.1.md b/man/docker-push.1.md new file mode 100644 index 00000000..cf4bc255 --- /dev/null +++ b/man/docker-push.1.md @@ -0,0 +1,52 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-push - Push an image or a repository to a registry + +# SYNOPSIS +**docker push** +[**--help**] +NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] + +# DESCRIPTION + +This command pushes an image or a repository to a registry. If you do not +specify a `REGISTRY_HOST`, the command uses Docker's public registry located at +`registry-1.docker.io` by default. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + +# Pushing a new image to a registry + +First save the new image by finding the container ID (using **docker ps**) +and then committing it to a new image name. Note that only a-z0-9-_. are +allowed when naming images: + + # docker commit c16378f943fe rhel-httpd + +Now, push the image to the registry using the image ID. In this example the +registry is on host named `registry-host` and listening on port `5000`. To do +this, tag the image with the host name or IP address, and the port of the +registry: + + # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd + # docker push registry-host:5000/myadmin/rhel-httpd + +Check that this worked by running: + + # docker images + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +June 2015, updated by Sally O'Malley diff --git a/man/docker-rename.1.md b/man/docker-rename.1.md new file mode 100644 index 00000000..f741a15b --- /dev/null +++ b/man/docker-rename.1.md @@ -0,0 +1,13 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCTOBER 2014 +# NAME +docker-rename - Rename a container + +# SYNOPSIS +**docker rename** +OLD_NAME NEW_NAME + +# OPTIONS +There are no available options. + diff --git a/man/docker-restart.1.md b/man/docker-restart.1.md new file mode 100644 index 00000000..77f99d51 --- /dev/null +++ b/man/docker-restart.1.md @@ -0,0 +1,26 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-restart - Restart a running container + +# SYNOPSIS +**docker restart** +[**--help**] +[**-t**|**--time**[=*10*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION +Restart each container listed. + +# OPTIONS +**--help** + Print usage statement + +**-t**, **--time**=10 + Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-rm.1.md b/man/docker-rm.1.md new file mode 100644 index 00000000..82850a39 --- /dev/null +++ b/man/docker-rm.1.md @@ -0,0 +1,56 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rm - Remove one or more containers + +# SYNOPSIS +**docker rm** +[**-f**|**--force**[=*false*]] +[**-l**|**--link**[=*false*]] +[**-v**|**--volumes**[=*false*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +**docker rm** will remove one or more containers from the host node. The +container name or ID can be used. This does not remove images. You cannot +remove a running container unless you use the \fB-f\fR option. To see all +containers on a host use the **docker ps -a** command. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--force**=*true*|*false* + Force the removal of a running container (uses SIGKILL). The default is *false*. + +**-l**, **--link**=*true*|*false* + Remove the specified link and not the underlying container. The default is *false*. + +**-v**, **--volumes**=*true*|*false* + Remove the volumes associated with the container. The default is *false*. + +# EXAMPLES + +##Removing a container using its ID## + +To remove a container using its ID, find either from a **docker ps -a** +command, or use the ID returned from the **docker run** command, or retrieve +it from a file used to store it using the **docker run --cidfile**: + + docker rm abebf7571666 + +##Removing a container using the container name## + +The name of the container can be found using the **docker ps -a** +command. The use that name as follows: + + docker rm hopeful_morse + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff --git a/man/docker-rmi.1.md b/man/docker-rmi.1.md new file mode 100644 index 00000000..01dc64f5 --- /dev/null +++ b/man/docker-rmi.1.md @@ -0,0 +1,42 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rmi - Remove one or more images + +# SYNOPSIS +**docker rmi** +[**-f**|**--force**[=*false*]] +[**--help**] +[**--no-prune**[=*false*]] +IMAGE [IMAGE...] + +# DESCRIPTION + +Removes one or more images from the host node. This does not remove images from +a registry. You cannot remove an image of a running container unless you use the +**-f** option. To see all images on a host use the **docker images** command. + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force removal of the image. The default is *false*. + +**--help** + Print usage statement + +**--no-prune**=*true*|*false* + Do not delete untagged parents. The default is *false*. + +# EXAMPLES + +## Removing an image + +Here is an example of removing an image: + + docker rmi fedora/httpd + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 diff --git a/man/docker-run.1.md b/man/docker-run.1.md new file mode 100644 index 00000000..d48e4125 --- /dev/null +++ b/man/docker-run.1.md @@ -0,0 +1,670 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-run - Run a command in a new container + +# SYNOPSIS +**docker run** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**-c**|**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cgroup-parent**[=*CGROUP-PATH*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**-d**|**--detach**[=*false*]] +[**--device**[=*[]*]] +[**--dns**[=*[]*]] +[**--dns-search**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**--group-add**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**--help**] +[**-i**|**--interactive**[=*false*]] +[**--ipc**[=*IPC*]] +[**-l**|**--label**[=*[]*]] +[**--label-file**[=*[]*]] +[**--link**[=*[]*]] +[**--log-driver**[=*[]*]] +[**--log-opt**[=*[]*]] +[**--lxc-conf**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] +[**--memory-swap**[=*MEMORY-SWAP*]] +[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] +[**--name**[=*NAME*]] +[**--net**[=*"bridge"*]] +[**--oom-kill-disable**[=*false*]] +[**-P**|**--publish-all**[=*false*]] +[**-p**|**--publish**[=*[]*]] +[**--pid**[=*[]*]] +[**--privileged**[=*false*]] +[**--read-only**[=*false*]] +[**--restart**[=*RESTART*]] +[**--rm**[=*false*]] +[**--security-opt**[=*[]*]] +[**--sig-proxy**[=*true*]] +[**-t**|**--tty**[=*false*]] +[**-u**|**--user**[=*USER*]] +[**-v**|**--volume**[=*[]*]] +[**--ulimit**[=*[]*]] +[**--uts**[=*[]*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] +IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Run a process in a new container. **docker run** starts a process with its own +file system, its own networking, and its own isolated process tree. The IMAGE +which starts the process may define defaults related to the process that will be +run in the container, the networking to expose, and more, but **docker run** +gives final control to the operator or administrator who starts the container +from the image. For that reason **docker run** has more options than any other +Docker command. + +If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and +all image dependencies, from the repository in the same way running **docker +pull** IMAGE, before it starts the container from that image. + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + + In foreground mode (the default when **-d** +is not specified), **docker run** can start the process in the container +and attach the console to the process’s standard input, output, and standard +error. It can even pretend to be a TTY (this is what most commandline +executables expect) and pass along signals. The **-a** option can be set for +each of stdin, stdout, and stderr. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + + Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** +option can be set multiple times. + +**--blkio-weight**=0 + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**-c**, **--cpu-shares**=0 + CPU shares (relative weight) + + By default, all containers get the same proportion of CPU cycles. This proportion +can be modified by changing the container's CPU share weighting relative +to the weighting of all other running containers. + +To modify the proportion from the default of 1024, use the **-c** or **--cpu-shares** +flag to set the weighting to 2 or higher. + +The proportion will only apply when CPU-intensive processes are running. +When tasks in one container are idle, other containers can use the +left-over CPU time. The actual amount of CPU time will vary depending on +the number of containers running on the system. + +For example, consider three containers, one has a cpu-share of 1024 and +two others have a cpu-share setting of 512. When processes in all three +containers attempt to use 100% of CPU, the first container would receive +50% of the total CPU time. If you add a fourth container with a cpu-share +of 1024, the first container only gets 33% of the CPU. The remaining containers +receive 16.5%, 16.5% and 33% of the CPU. + +On a multi-core system, the shares of CPU time are distributed over all CPU +cores. Even if a container is limited to less than 100% of CPU time, it can +use 100% of each individual CPU core. + +For example, consider a system with more than three cores. If you start one +container **{C0}** with **-c=512** running one process, and another container +**{C1}** with **-c=1024** running two processes, this can result in the following +division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cgroup-parent**="" + Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +**--cidfile**="" + Write the container ID to the file + +**--cpu-period**=0 + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**--cpu-quota**=0 + Limit the CPU CFS (Completely Fair Scheduler) quota + + Limit the container's CPU usage. By default, containers run with the full +CPU resource. This flag tell the kernel to restrict the container's CPU usage +to the quota you specify. + +**-d**, **--detach**=*true*|*false* + Detached mode: run the container in the background and print the new container ID. The default is *false*. + + At any time you can run **docker ps** in +the other shell to view a list of the running containers. You can reattach to a +detached container with **docker attach**. If you choose to run a container in +the detached mode, then you cannot use the **-rm** option. + + When attached in the tty mode, you can detach from a running container without +stopping the process by pressing the keys CTRL-P CTRL-Q. + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) + +**--dns-search**=[] + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) + +**--dns**=[] + Set custom DNS servers + + This option can be used to override the DNS +configuration passed to the container. Typically this is necessary when the +host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this +is the case the **--dns** flags is necessary for every run. + +**-e**, **--env**=[] + Set environment variables + + This option allows you to specify arbitrary +environment variables that are available for the process that will be launched +inside of the container. + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + + This option allows you to overwrite the default entrypoint of the image that +is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND +because it specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The ENTRYPOINT gives a container its +default nature or behavior, so that when you set an ENTRYPOINT you can run the +container as if it were that binary, complete with default options, and you can +pass in more options via the COMMAND. But, sometimes an operator may want to run +something else inside the container, so you can override the default ENTRYPOINT +at runtime by using a **--entrypoint** and a string to specify the new +ENTRYPOINT. + +**--env-file**=[] + Read in a line delimited file of environment variables + +**--expose**=[] + Expose a port, or a range of ports (e.g. --expose=3300-3310), from the container without publishing it to your host + +**--group-add**=[] + Add additional groups to run as + +**-h**, **--hostname**="" + Container host name + + Sets the container host name that is available inside the container. + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + + When set to true, keep stdin open even if not attached. The default is false. + +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + +**-l**, **--label**=[] + Set metadata on the container (e.g., --label com.example.key=value) + +**--label-file**=[] + Read in a line delimited file of labels + +**--link**=[] + Add link to another container in the form of :alias or just +in which case the alias will match the name + + If the operator +uses **--link** when starting the new client container, then the client +container can access the exposed port via a private networking interface. Docker +will set some environment variables in the client container to help indicate +which interface and port to use. + +**--lxc-conf**=[] + (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + +**--log-driver**="|*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*none*" + Logging driver for container. Default is defined by daemon `--log-driver` flag. + **Warning**: `docker logs` command works only for `json-file` logging driver. + +**--log-opt**=[] + Logging driver specific options. + +**-m**, **--memory**="" + Memory limit (format: , where unit = b, k, m or g) + + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the **-m** memory setting can be larger than physical +RAM. If a limit of 0 is specified (not using **-m**), the container's memory is +not limited. The actual limit may be rounded up to a multiple of the operating +system's page size (the value would be very large, that's millions of trillions). + +**--memory-swap**="" + Total memory limit (memory + swap) + + Set `-1` to disable swap (format: , where unit = b, k, m or g). +This value should always larger than **-m**, so you should always use this with **-m**. + +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + + Remember that the MAC address in an Ethernet network must be unique. +The IPv6 link-local address will be based on the device's MAC address +according to RFC4862. + +**--name**="" + Assign a name to the container + + The operator can identify a container in three ways: + UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) + UUID short identifier (“f78375b1c487”) + Name (“jonah”) + + The UUID identifiers come from the Docker daemon, and if a name is not assigned +to the container with **--name** then the daemon will also generate a random +string name. The name is useful when defining links (see **--link**) (or any +other place you need to identify a container). This works for both background +and foreground Docker containers. + +**--net**="bridge" + Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + +**--oom-kill-disable**=*true*|*false* + Whether to disable OOM Killer for the container or not. + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to random ports on the host interfaces. The default is *false*. + + When set to true publish all exposed ports to the host interfaces. The +default is false. If the operator uses -P (or -p) then Docker will make the +exposed port accessible on the host and the ports will be available to any +client that can reach the host. When using -P, Docker will bind any exposed +port to a random port on the host within an *ephemeral port range* defined by +`/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host +ports and the exposed ports, use `docker port`. + +**-p**, **--publish**=[] + Publish a container's port, or range of ports, to the host. + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + Both hostPort and containerPort can be specified as a range of ports. + When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) + (use 'docker port' to see the actual mapping) + +**--pid**=host + Set the PID mode for the container + **host**: use the host's PID namespace inside the container. + Note: the host mode gives the container full access to local PID and is therefore considered insecure. + +**--uts**=host + Set the UTS mode for the container + **host**: use the host's UTS namespace inside the container. + Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + + By default, Docker containers are +“unprivileged” (=false) and cannot, for example, run a Docker daemon inside the +Docker container. This is because by default a container is not allowed to +access any devices. A “privileged” container is given access to all devices. + + When the operator executes **docker run --privileged**, Docker will enable access +to all devices on the host as well as set some configuration in AppArmor to +allow the container nearly all the same access to the host as processes running +outside of a container on the host. + +**--read-only**=*true*|*false* + Mount the container's root filesystem as read only. + + By default a container will have its root filesystem writable allowing processes +to write files anywhere. By specifying the `--read-only` flag the container will have +its root filesystem mounted as read only prohibiting any writes. + +**--restart**="no" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always) + +**--rm**=*true*|*false* + Automatically remove the container when it exits (incompatible with -d). The default is *false*. + +**--security-opt**=[] + Security Options + + "label:user:USER" : Set the label user for the container + "label:role:ROLE" : Set the label role for the container + "label:type:TYPE" : Set the label type for the container + "label:level:LEVEL" : Set the label level for the container + "label:disable" : Turn off label confinement for the container + +**--sig-proxy**=*true*|*false* + Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. + +**--memory-swappiness**="" + Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + + When set to true Docker can allocate a pseudo-tty and attach to the standard +input of any container. This can be used, for example, to run a throwaway +interactive shell. The default is value is false. + +The **-t** option is incompatible with a redirection of the docker client +standard input. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. + +""--ulimit""=[] + Ulimit options + +**-v**, **--volume**=[] + Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) + + The **-v** option can be used one or +more times to add one or more mounts to a container. These mounts can then be +used in other containers using the **--volumes-from** option. + + The volume may be optionally suffixed with :ro or :rw to mount the volumes in +read-only or read-write mode, respectively. By default, the volumes are mounted +read-write. See examples. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change a label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +Note: Multiple Volume options can be added separated by a "," + +**--volumes-from**=[] + Mount volumes from the specified container(s) + + Mounts already mounted volumes from a source container onto another + container. You must supply the source's container-id. To share + a volume, use the **--volumes-from** option when running + the target container. You can share volumes even if the source container + is not running. + + By default, Docker mounts the volumes in the same mode (read-write or + read-only) as it is mounted in the source container. Optionally, you + can change this by suffixing the container-id with either the `:ro` or + `:rw ` keyword. + + If the location of the volume from the source container overlaps with + data residing on a target container, then the volume hides + that data on the target. + +**-w**, **--workdir**="" + Working directory inside the container + + The default working directory for +running binaries within a container is the root directory (/). The developer can +set a different default with the Dockerfile WORKDIR instruction. The operator +can override the working directory by using the **-w** option. + +# EXAMPLES + +## Exposing log messages from the container to the host's log + +If you want messages that are logged in your container to show up in the host's +syslog/journal then you should bind mount the /dev/log directory as follows. + + # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash + +From inside the container you can test this by sending a message to the log. + + (bash)# logger "Hello from my container" + +Then exit and check the journal. + + # exit + + # journalctl -b | grep Hello + +This should list the message sent to logger. + +## Attaching to one or more from STDIN, STDOUT, STDERR + +If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) +. You can specify to which of the three standard streams (stdin, stdout, stderr) +you’d like to connect instead, as in: + + # docker run -a stdin -a stdout -i -t fedora /bin/bash + +## Sharing IPC between containers + +Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html + +Testing `--ipc=host` mode: + +Host shows a shared memory segment with 7 pids attached, happens to be from httpd: + +``` + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` + +Now run a regular container, and it correctly does NOT see the shared memory segment from the host: + +``` + $ docker run -it shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: + + ``` + $ docker run -it --ipc=host shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` +Testing `--ipc=container:CONTAINERID` mode: + +Start a container with a program to create a shared memory segment: +``` + $ docker run -it shm bash + $ sudo shm/shm_server & + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` +Create a 2nd container correctly shows no shared memory segment from 1st container: +``` + $ docker run shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: + +``` + $ docker run -it --ipc=container:ed735b2264ac shm ipcs -m + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` + +## Linking Containers + +The link feature allows multiple containers to communicate with each other. For +example, a container whose Dockerfile has exposed port 80 can be run and named +as follows: + + # docker run --name=link-test -d -i -t fedora/httpd + +A second container, in this case called linker, can communicate with the httpd +container, named link-test, by running with the **--link=:** + + # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash + +Now the container linker is linked to container link-test with the alias lt. +Running the **env** command in the linker container shows environment variables + with the LT (alias) context (**LT_**) + + # env + HOSTNAME=668231cb0978 + TERM=xterm + LT_PORT_80_TCP=tcp://172.17.0.3:80 + LT_PORT_80_TCP_PORT=80 + LT_PORT_80_TCP_PROTO=tcp + LT_PORT=tcp://172.17.0.3:80 + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + PWD=/ + LT_NAME=/linker/lt + SHLVL=1 + HOME=/ + LT_PORT_80_TCP_ADDR=172.17.0.3 + _=/usr/bin/env + +When linking two containers Docker will use the exposed ports of the container +to create a secure tunnel for the parent to access. + + +## Mapping Ports for External Usage + +The exposed port of an application can be mapped to a host port using the **-p** +flag. For example, a httpd port 80 can be mapped to the host port 8080 using the +following: + + # docker run -p 8080:80 -d -i -t fedora/httpd + +## Creating and Mounting a Data Volume Container + +Many applications require the sharing of persistent data across several +containers. Docker allows you to create a Data Volume Container that other +containers can mount from. For example, create a named container that contains +directories /var/volume1 and /tmp/volume2. The image will need to contain these +directories so a couple of RUN mkdir instructions might be required for you +fedora-data image: + + # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true + # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash + +Multiple --volumes-from parameters will bring together multiple data volumes from +multiple containers. And it's possible to mount the volumes that came from the +DATA container in yet another container via the fedora-container1 intermediary +container, allowing to abstract the actual data source from users of that data: + + # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash + +## Mounting External Volumes + +To mount a host directory as a container volume, specify the absolute path to +the directory and the absolute path for the container directory separated by a +colon: + + # docker run -v /var/db:/data1 -i -t fedora bash + +When using SELinux, be aware that the host has no knowledge of container SELinux +policy. Therefore, in the above example, if SELinux policy is enforced, the +`/var/db` directory is not writable to the container. A "Permission Denied" +message will occur and an avc: message in the host's syslog. + + +To work around this, at time of writing this man page, the following command +needs to be run in order for the proper SELinux policy type label to be attached +to the host directory: + + # chcon -Rt svirt_sandbox_file_t /var/db + + +Now, writing to the /data1 volume in the container will be allowed and the +changes will also be reflected on the host in /var/db. + +## Using alternative security labeling + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. For example, you can specify the MCS/MLS level, a +requirement for MLS systems. Specifying the level in the following command +allows you to share the same content between containers. + + # docker run --security-opt label:level:s0:c100,c200 -i -t fedora bash + +An MLS example might be: + + # docker run --security-opt label:level:TopSecret -i -t rhel7 bash + +To disable the security labeling for this container versus running with the +`--permissive` flag, use the following command: + + # docker run --security-opt label:disable -i -t fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + # docker run --security-opt label:type:svirt_apache_t -i -t centos bash + +Note: + +You would have to write policy defining a `svirt_apache_t` type. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff --git a/man/docker-save.1.md b/man/docker-save.1.md new file mode 100644 index 00000000..5f336ffd --- /dev/null +++ b/man/docker-save.1.md @@ -0,0 +1,45 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-save - Save an image(s) to a tar archive (streamed to STDOUT by default) + +# SYNOPSIS +**docker save** +[**--help**] +[**-o**|**--output**[=*OUTPUT*]] +IMAGE [IMAGE...] + +# DESCRIPTION +Produces a tarred repository to the standard output stream. Contains all +parent layers, and all tags + versions, or specified repo:tag. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**--help** + Print usage statement + +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES + +Save all fedora repository images to a fedora-all.tar and save the latest +fedora image to a fedora-latest.tar: + + $ docker save fedora > fedora-all.tar + $ docker save --output=fedora-latest.tar fedora:latest + $ ls -sh fedora-all.tar + 721M fedora-all.tar + $ ls -sh fedora-latest.tar + 367M fedora-latest.tar + +# See also +**docker-load(1)** to load an image from a tar archive on STDIN. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/man/docker-search.1.md b/man/docker-search.1.md new file mode 100644 index 00000000..3eaefd06 --- /dev/null +++ b/man/docker-search.1.md @@ -0,0 +1,65 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-search - Search the Docker Hub for images + +# SYNOPSIS +**docker search** +[**--automated**[=*false*]] +[**--help**] +[**--no-trunc**[=*false*]] +[**-s**|**--stars**[=*0*]] +TERM + +# DESCRIPTION + +Search Docker Hub for images that match the specified `TERM`. The table +of images returned displays the name, description (truncated by default), number +of stars awarded, whether the image is official, and whether it is automated. + +*Note* - Search queries will only return up to 25 results + +# OPTIONS +**--automated**=*true*|*false* + Only show automated builds. The default is *false*. + +**--help** + Print usage statement + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-s**, **--stars**=0 + Only displays with at least x stars + +# EXAMPLES + +## Search Docker Hub for ranked images + +Search a registry for the term 'fedora' and only display those images +ranked 3 or higher: + + $ docker search -s 3 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + mattdm/fedora A basic Fedora image corresponding roughly... 50 + fedora (Semi) Official Fedora base image. 38 + mattdm/fedora-small A small Fedora image on which to build. Co... 8 + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + +## Search Docker Hub for automated images + +Search Docker Hub for the term 'fedora' and only display automated images +ranked 1 or higher: + + $ docker search -s 1 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 + diff --git a/man/docker-start.1.md b/man/docker-start.1.md new file mode 100644 index 00000000..523b3155 --- /dev/null +++ b/man/docker-start.1.md @@ -0,0 +1,34 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-start - Start one or more stopped containers + +# SYNOPSIS +**docker start** +[**-a**|**--attach**[=*false*]] +[**--help**] +[**-i**|**--interactive**[=*false*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Start one or more stopped containers. + +# OPTIONS +**-a**, **--attach**=*true*|*false* + Attach container's STDOUT and STDERR and forward all signals to the process. The default is *false*. + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Attach container's STDIN. The default is *false*. + +# See also +**docker-stop(1)** to stop a running container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-stats.1.md b/man/docker-stats.1.md new file mode 100644 index 00000000..4b485885 --- /dev/null +++ b/man/docker-stats.1.md @@ -0,0 +1,31 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stats - Display a live stream of one or more containers' resource usage statistics + +# SYNOPSIS +**docker stats** +[**--help**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Display a live stream of one or more containers' resource usage statistics + +# OPTIONS +**--help** + Print usage statement + +**--no-stream**="false" + Disable streaming stats and only pull the first result + +# EXAMPLES + +Run **docker stats** with multiple containers. + + $ docker stats redis1 redis2 + CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O + redis1 0.07% 796 KB/64 MB 1.21% 788 B/648 B + redis2 0.07% 2.746 MB/64 MB 4.29% 1.266 KB/648 B + diff --git a/man/docker-stop.1.md b/man/docker-stop.1.md new file mode 100644 index 00000000..9b882db4 --- /dev/null +++ b/man/docker-stop.1.md @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stop - Stop a running container by sending SIGTERM and then SIGKILL after a grace period + +# SYNOPSIS +**docker stop** +[**--help**] +[**-t**|**--time**[=*10*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION +Stop a running container (Send SIGTERM, and then SIGKILL after + grace period) + +# OPTIONS +**--help** + Print usage statement + +**-t**, **--time**=10 + Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. + +#See also +**docker-start(1)** to restart a stopped container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/man/docker-tag.1.md b/man/docker-tag.1.md new file mode 100644 index 00000000..5bdc9d9b --- /dev/null +++ b/man/docker-tag.1.md @@ -0,0 +1,66 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-tag - Tag an image into a repository + +# SYNOPSIS +**docker tag** +[**-f**|**--force**[=*false*]] +[**--help**] +IMAGE[:TAG] [REGISTRY_HOST/][USERNAME/]NAME[:TAG] + +# DESCRIPTION +Assigns a new alias to an image in a registry. An alias refers to the +entire image name including the optional `TAG` after the ':'. + +If you do not specify a `REGISTRY_HOST`, the command uses Docker's public +registry located at `registry-1.docker.io` by default. + +# "OPTIONS" +**-f**, **--force**=*true*|*false* + When set to true, force the alias. The default is *false*. + +**REGISTRYHOST** + The hostname of the registry if required. This may also include the port +separated by a ':' + +**USERNAME** + The username or other qualifying identifier for the image. + +**NAME** + The image name. + +**TAG** + The tag you are assigning to the image. Though this is arbitrary it is +recommended to be used for a version to distinguish images with the same name. +Also, for consistency tags should only include a-z0-9-_. . +Note that here TAG is a part of the overall name or "tag". + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force. The default is *false*. + +# EXAMPLES + +## Giving an image a new alias + +Here is an example of aliasing an image (e.g., 0e5574283393) as "httpd" and +tagging it into the "fedora" repository with "version1.0": + + docker tag 0e5574283393 fedora/httpd:version1.0 + +## Tagging an image for a private repository + +To push an image to an private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + + docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +June 2015, updated by Sally O'Malley diff --git a/man/docker-top.1.md b/man/docker-top.1.md new file mode 100644 index 00000000..9828d98b --- /dev/null +++ b/man/docker-top.1.md @@ -0,0 +1,34 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-top - Display the running processes of a container + +# SYNOPSIS +**docker top** +[**--help**] +CONTAINER [ps OPTIONS] + +# DESCRIPTION + +Display the running process of the container. ps-OPTION can be any of the + options you would pass to a Linux ps command. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + +Run **docker top** with the ps option of -x: + + $ docker top 8601afda2b -x + PID TTY STAT TIME COMMAND + 16623 ? Ss 0:00 sleep 99999 + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Ma Shimiao diff --git a/man/docker-unpause.1.md b/man/docker-unpause.1.md new file mode 100644 index 00000000..466e1bb1 --- /dev/null +++ b/man/docker-unpause.1.md @@ -0,0 +1,27 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-unpause - Unpause all processes within a container + +# SYNOPSIS +**docker unpause** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The `docker unpause` command uses the cgroups freezer to un-suspend all +processes in a container. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + +# OPTIONS +There are no available options. + +# See also +**docker-pause(1)** to pause all processes within a container. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/man/docker-version.1.md b/man/docker-version.1.md new file mode 100644 index 00000000..04ae3464 --- /dev/null +++ b/man/docker-version.1.md @@ -0,0 +1,62 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2015 +# NAME +docker-version - Show the Docker version information. + +# SYNOPSIS +**docker version** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] + +# DESCRIPTION +This command displays version information for both the Docker client and +daemon. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given go template. + +# EXAMPLES + +## Display Docker version information + +The default output: + + $ docker version + Client: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + + Server: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + +Get server version: + + $ docker version --format '{{.Server.Version}}' + 1.8.0 + +Dump raw data: + +To view all available fields, you can use the format `{{json .}}`. + + $ docker version --format '{{json .}}' + {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} + + +# HISTORY +June 2014, updated by Sven Dowideit +June 2015, updated by John Howard +June 2015, updated by Patrick Hemmer diff --git a/man/docker.1.md b/man/docker.1.md new file mode 100644 index 00000000..92bd82e3 --- /dev/null +++ b/man/docker.1.md @@ -0,0 +1,540 @@ +% DOCKER(1) Docker User Manuals +% William Henry +% APRIL 2014 +# NAME +docker \- Docker image and container command line interface + +# SYNOPSIS +**docker** [OPTIONS] COMMAND [arg...] + +# DESCRIPTION +**docker** has two distinct functions. It is used for starting the Docker +daemon and to run the CLI (i.e., to command the daemon to manage images, +containers etc.) So **docker** is both a server, as a daemon, and a client +to the daemon, through the CLI. + +To run the Docker daemon you do not specify any of the commands listed below but +must specify the **-d** option. The other options listed below are for the +daemon only. + +The Docker CLI has over 30 commands. The commands are listed below and each has +its own man page which explain usage and arguments. + +To see the man page for a command run **man docker **. + +# OPTIONS +**--help** + Print usage statement + +**--api-cors-header**="" + Set CORS headers in the remote API. Default is cors disabled. Give urls like "http://foo, http://bar, ...". Give "*" to allow all. + +**-b**, **--bridge**="" + Attach containers to a pre\-existing network bridge; use 'none' to disable container networking + +**--bip**="" + Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b + +**--config**="" + Specifies the location of the Docker client configuration files. The default is '~/.docker'. + +**-D**, **--debug**=*true*|*false* + Enable debug mode. Default is false. + +**-d**, **--daemon**=*true*|*false* + Enable daemon mode. Default is false. + +**--default-gateway**="" + IPv4 address of the container default gateway; this address must be part of the bridge subnet (which is defined by \-b or \--bip) + +**--default-gateway-v6**="" + IPv6 address of the container default gateway + +**--default-ulimit**=[] + Set default ulimits for containers. + +**--disable-legacy-registry=**true|false + Do not contact legacy registries + +**--dns**="" + Force Docker to use specific DNS servers + +**--dns-search**=[] + DNS search domains to use. + +**-e**, **--exec-driver**="" + Force Docker to use specific exec driver. Default is `native`. + +**--exec-opt**=[] + Set exec driver options. See EXEC DRIVER OPTIONS. + +**--exec-root**="" + Path to use as the root of the Docker exec driver. Default is `/var/run/docker`. + +**--fixed-cidr**="" + IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) + +**--fixed-cidr-v6**="" + IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64) + +**-G**, **--group**="" + Group to assign the unix socket specified by -H when running in daemon mode. + use '' (the empty string) to disable setting of a group. Default is `docker`. + +**-g**, **--graph**="" + Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. + +**-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host:port] to bind or +unix://[/path/to/socket] to use. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + +**--icc**=*true*|*false* + Allow unrestricted inter\-container and Docker daemon host communication. If disabled, containers can still be linked together using **--link** option (see **docker-run(1)**). Default is true. + +**--insecure-registry**=[] + Enable insecure registry communication. + +**--ip**="" + Default IP address to use when binding container ports. Default is `0.0.0.0`. + +**--ip-forward**=*true*|*false* + Enables IP forwarding on the Docker host. The default is `true`. This flag interacts with the IP forwarding setting on your host system's kernel. If your system has IP forwarding disabled, this setting enables it. If your system has IP forwarding enabled, setting this flag to `--ip-forward=false` has no effect. + + This setting will also enable IPv6 forwarding if you have both `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject Router Advertisements and interfere with the host's existing IPv6 configuration. For more information, please consult the documentation about "Advanced Networking - IPv6". + +**--ip-masq**=*true*|*false* + Enable IP masquerading for bridge's IP range. Default is true. + +**--iptables**=*true*|*false* + Enable Docker's addition of iptables rules. Default is true. + +**--ipv6**=*true*|*false* + Enable IPv6 support. Default is false. Docker will create an IPv6-enabled bridge with address fe80::1 which will allow you to create IPv6-enabled containers. Use together with `--fixed-cidr-v6` to provide globally routable IPv6 addresses. IPv6 forwarding will be enabled if not used with `--ip-forward=false`. This may collide with your host's current IPv6 settings. For more information please consult the documentation about "Advanced Networking - IPv6". + +**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*"" + Set the logging level. Default is `info`. + +**--label**="[]" + Set key=value labels to the daemon (displayed in `docker info`) + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*none*" + Default driver for container logs. Default is `json-file`. + **Warning**: `docker logs` command works only for `json-file` logging driver. + +**--log-opt**=[] + Logging driver specific options. + +**--mtu**=VALUE + Set the containers network mtu. Default is `0`. + +**-p**, **--pidfile**="" + Path to use for daemon PID file. Default is `/var/run/docker.pid` + +**--registry-mirror**=:// + Prepend a registry mirror to be used for image pulls. May be specified multiple times. + +**-s**, **--storage-driver**="" + Force the Docker runtime to use a specific storage driver. + +**--selinux-enabled**=*true*|*false* + Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver. + +**--storage-opt**=[] + Set storage driver options. See STORAGE DRIVER OPTIONS. + +**--tls**=*true*|*false* + Use TLS; implied by --tlsverify. Default is false. + +**--tlscacert**=~/.docker/ca.pem + Trust certs signed only by this CA. + +**--tlscert**=~/.docker/cert.pem + Path to TLS certificate file. + +**--tlskey**=~/.docker/key.pem + Path to TLS key file. + +**--tlsverify**=*true*|*false* + Use TLS and verify the remote (daemon: verify client, client: verify daemon). + Default is false. + +**--userland-proxy**=*true*|*false* + Rely on a userland proxy implementation for inter-container and outside-to-container loopback communications. Default is true. + +**-v**, **--version**=*true*|*false* + Print version information and quit. Default is false. + +# COMMANDS +**attach** + Attach to a running container + See **docker-attach(1)** for full documentation on the **attach** command. + +**build** + Build an image from a Dockerfile + See **docker-build(1)** for full documentation on the **build** command. + +**commit** + Create a new image from a container's changes + See **docker-commit(1)** for full documentation on the **commit** command. + +**cp** + Copy files/folders from a container's filesystem to the host + See **docker-cp(1)** for full documentation on the **cp** command. + +**create** + Create a new container + See **docker-create(1)** for full documentation on the **create** command. + +**diff** + Inspect changes on a container's filesystem + See **docker-diff(1)** for full documentation on the **diff** command. + +**events** + Get real time events from the server + See **docker-events(1)** for full documentation on the **events** command. + +**exec** + Run a command in a running container + See **docker-exec(1)** for full documentation on the **exec** command. + +**export** + Stream the contents of a container as a tar archive + See **docker-export(1)** for full documentation on the **export** command. + +**history** + Show the history of an image + See **docker-history(1)** for full documentation on the **history** command. + +**images** + List images + See **docker-images(1)** for full documentation on the **images** command. + +**import** + Create a new filesystem image from the contents of a tarball + See **docker-import(1)** for full documentation on the **import** command. + +**info** + Display system-wide information + See **docker-info(1)** for full documentation on the **info** command. + +**inspect** + Return low-level information on a container or image + See **docker-inspect(1)** for full documentation on the **inspect** command. + +**kill** + Kill a running container (which includes the wrapper process and everything +inside it) + See **docker-kill(1)** for full documentation on the **kill** command. + +**load** + Load an image from a tar archive + See **docker-load(1)** for full documentation on the **load** command. + +**login** + Register or login to a Docker Registry + See **docker-login(1)** for full documentation on the **login** command. + +**logout** + Log the user out of a Docker Registry + See **docker-logout(1)** for full documentation on the **logout** command. + +**logs** + Fetch the logs of a container + See **docker-logs(1)** for full documentation on the **logs** command. + +**pause** + Pause all processes within a container + See **docker-pause(1)** for full documentation on the **pause** command. + +**port** + Lookup the public-facing port which is NAT-ed to PRIVATE_PORT + See **docker-port(1)** for full documentation on the **port** command. + +**ps** + List containers + See **docker-ps(1)** for full documentation on the **ps** command. + +**pull** + Pull an image or a repository from a Docker Registry + See **docker-pull(1)** for full documentation on the **pull** command. + +**push** + Push an image or a repository to a Docker Registry + See **docker-push(1)** for full documentation on the **push** command. + +**rename** + Rename a container. + See **docker-rename(1)** for full documentation on the **rename** command. + +**restart** + Restart a running container + See **docker-restart(1)** for full documentation on the **restart** command. + +**rm** + Remove one or more containers + See **docker-rm(1)** for full documentation on the **rm** command. + +**rmi** + Remove one or more images + See **docker-rmi(1)** for full documentation on the **rmi** command. + +**run** + Run a command in a new container + See **docker-run(1)** for full documentation on the **run** command. + +**save** + Save an image to a tar archive + See **docker-save(1)** for full documentation on the **save** command. + +**search** + Search for an image in the Docker index + See **docker-search(1)** for full documentation on the **search** command. + +**start** + Start a stopped container + See **docker-start(1)** for full documentation on the **start** command. + +**stats** + Display a live stream of one or more containers' resource usage statistics + See **docker-stats(1)** for full documentation on the **stats** command. + +**stop** + Stop a running container + See **docker-stop(1)** for full documentation on the **stop** command. + +**tag** + Tag an image into a repository + See **docker-tag(1)** for full documentation on the **tag** command. + +**top** + Lookup the running processes of a container + See **docker-top(1)** for full documentation on the **top** command. + +**unpause** + Unpause all processes within a container + See **docker-unpause(1)** for full documentation on the **unpause** command. + +**version** + Show the Docker version information + See **docker-version(1)** for full documentation on the **version** command. + +**wait** + Block until a container stops, then print its exit code + See **docker-wait(1)** for full documentation on the **wait** command. + +# STORAGE DRIVER OPTIONS + +Docker uses storage backends (known as "graphdrivers" in the Docker +internals) to create writable containers from images. Many of these +backends use operating system level technologies and can be +configured. + +Specify options to the storage backend with **--storage-opt** flags. The only +backend that currently takes options is *devicemapper*. Therefore use these +flags with **-s=**devicemapper. + +Specifically for devicemapper, the default is a "loopback" model which +requires no pre-configuration, but is extremely inefficient. Do not +use it in production. + +To make the best use of Docker with the devicemapper backend, you must +have a recent version of LVM. Use `lvm` to create a thin pool; for +more information see `man lvmthin`. Then, use `--storage-opt +dm.thinpooldev` to tell the Docker engine to use that pool for +allocating images and container snapshots. + +Here is the list of *devicemapper* options: + +#### dm.thinpooldev + +Specifies a custom block storage device to use for the thin pool. + +If using a block device for device mapper storage, it is best to use +`lvm` to create and manage the thin-pool volume. This volume is then +handed to Docker to create snapshot volumes needed for images and +containers. + +Managing the thin-pool outside of Docker makes for the most feature-rich method +of having Docker utilize device mapper thin provisioning as the backing storage +for Docker's containers. The highlights of the LVM-based thin-pool management +feature include: automatic or interactive thin-pool resize support, dynamically +changing thin-pool features, automatic thinp metadata checking when lvm activates +the thin-pool, etc. + +Example use: `docker -d --storage-opt dm.thinpooldev=/dev/mapper/thin-pool` + +#### dm.basesize + +Specifies the size to use when creating the base device, which limits +the size of images and containers. The default value is 100G. Note, +thin devices are inherently "sparse", so a 100G device which is mostly +empty doesn't use 100 GB of space on the pool. However, the filesystem +will use more space for base images the larger the device +is. + +This value affects the system-wide "base" empty filesystem that may already +be initialized and inherited by pulled images. Typically, a change to this +value requires additional steps to take effect: + + $ sudo service docker stop + $ sudo rm -rf /var/lib/docker + $ sudo service docker start + +Example use: `docker -d --storage-opt dm.basesize=20G` + +#### dm.fs + +Specifies the filesystem type to use for the base device. The +supported options are `ext4` and `xfs`. The default is `ext4`. + +Example use: `docker -d --storage-opt dm.fs=xfs` + +#### dm.mkfsarg + +Specifies extra mkfs arguments to be used when creating the base device. + +Example use: `docker -d --storage-opt "dm.mkfsarg=-O ^has_journal"` + +#### dm.mountopt + +Specifies extra mount options used when mounting the thin devices. + +Example use: `docker -d --storage-opt dm.mountopt=nodiscard` + +#### dm.use_deferred_removal + +Enables use of deferred device removal if `libdm` and the kernel driver +support the mechanism. + +Deferred device removal means that if device is busy when devices are +being removed/deactivated, then a deferred removal is scheduled on +device. And devices automatically go away when last user of the device +exits. + +For example, when a container exits, its associated thin device is removed. If +that device has leaked into some other mount namespace and can't be removed, +the container exit still succeeds and this option causes the system to schedule +the device for deferred removal. It does not wait in a loop trying to remove a busy +device. + +Example use: `docker -d --storage-opt dm.use_deferred_removal=true` + +#### dm.loopdatasize + +**Note**: This option configures devicemapper loopback, which should not be used in production. + +Specifies the size to use when creating the loopback file for the +"data" device which is used for the thin pool. The default size is +100G. The file is sparse, so it will not initially take up +this much space. + +Example use: `docker -d --storage-opt dm.loopdatasize=200G` + +#### dm.loopmetadatasize + +**Note**: This option configures devicemapper loopback, which should not be used in production. + +Specifies the size to use when creating the loopback file for the +"metadata" device which is used for the thin pool. The default size +is 2G. The file is sparse, so it will not initially take up +this much space. + +Example use: `docker -d --storage-opt dm.loopmetadatasize=4G` + +#### dm.datadev + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for data for a +Docker-managed thin pool. It is better to use `dm.thinpooldev` - see +the documentation for it above for discussion of the advantages. + +#### dm.metadatadev + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for metadata for a +Docker-managed thin pool. See `dm.datadev` for why this is +deprecated. + +#### dm.blocksize + +Specifies a custom blocksize to use for the thin pool. The default +blocksize is 64K. + +Example use: `docker -d --storage-opt dm.blocksize=512K` + +#### dm.blkdiscard + +Enables or disables the use of `blkdiscard` when removing devicemapper +devices. This is disabled by default due to the additional latency, +but as a special case with loopback devices it will be enabled, in +order to re-sparsify the loopback file on image/container removal. + +Disabling this on loopback can lead to *much* faster container removal +times, but it also prevents the space used in `/var/lib/docker` directory +from being returned to the system for other use when containers are +removed. + +Example use: `docker -d --storage-opt dm.blkdiscard=false` + +#### dm.override_udev_sync_check + +By default, the devicemapper backend attempts to synchronize with the +`udev` device manager for the Linux kernel. This option allows +disabling that synchronization, to continue even though the +configuration may be buggy. + +To view the `udev` sync support of a Docker daemon that is using the +`devicemapper` driver, run: + + $ docker info + [...] + Udev Sync Supported: true + [...] + +When `udev` sync support is `true`, then `devicemapper` and `udev` can +coordinate the activation and deactivation of devices for containers. + +When `udev` sync support is `false`, a race condition occurs between +the `devicemapper` and `udev` during create and cleanup. The race +condition results in errors and failures. (For information on these +failures, see +[docker#4036](https://github.com/docker/docker/issues/4036)) + +To allow the `docker` daemon to start, regardless of whether `udev` sync is +`false`, set `dm.override_udev_sync_check` to true: + + $ docker -d --storage-opt dm.override_udev_sync_check=true + +When this value is `true`, the driver continues and simply warns you +the errors are happening. + +**Note**: The ideal is to pursue a `docker` daemon and environment +that does support synchronizing with `udev`. For further discussion on +this topic, see +[docker#4036](https://github.com/docker/docker/issues/4036). +Otherwise, set this flag for migrating existing Docker daemons to a +daemon with a supported environment. + +# EXEC DRIVER OPTIONS + +Use the **--exec-opt** flags to specify options to the exec-driver. The only +driver that accepts this flag is the *native* (libcontainer) driver. As a +result, you must also specify **-s=**native for this option to have effect. The +following is the only *native* option: + +#### native.cgroupdriver +Specifies the management of the container's `cgroups`. You can specify +`cgroupfs` or `systemd`. If you specify `systemd` and it is not available, the +system uses `cgroupfs`. + +#### Client +For specific client examples please see the man page for the specific Docker +command. For example: + + man docker-run + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. diff --git a/man/md2man-all.sh b/man/md2man-all.sh new file mode 100755 index 00000000..97c65c93 --- /dev/null +++ b/man/md2man-all.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# get into this script's directory +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +[ "$1" = '-q' ] || { + set -x + pwd +} + +for FILE in *.md; do + base="$(basename "$FILE")" + name="${base%.md}" + num="${name##*.}" + if [ -z "$num" -o "$name" = "$num" ]; then + # skip files that aren't of the format xxxx.N.md (like README.md) + continue + fi + mkdir -p "./man${num}" + go-md2man -in "$FILE" -out "./man${num}/${name}" +done diff --git a/opts/envfile.go b/opts/envfile.go new file mode 100644 index 00000000..b854227e --- /dev/null +++ b/opts/envfile.go @@ -0,0 +1,62 @@ +package opts + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + // EnvironmentVariableRegexp A regexp to validate correct environment variables + // Environment variables set by the user must have a name consisting solely of + // alphabetics, numerics, and underscores - the first of which must not be numeric. + EnvironmentVariableRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") +) + +// ParseEnvFile Read in a line delimited file with environment variables enumerated +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + for scanner.Scan() { + line := scanner.Text() + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + data := strings.SplitN(line, "=", 2) + + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + + if !EnvironmentVariableRegexp.MatchString(variable) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", variable)} + } + if len(data) > 1 { + + // pass the value through, no trimming + lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) + } else { + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) + } + } + } + return lines, scanner.Err() +} + +var whiteSpaces = " \t" + +// ErrBadEnvVariable typed error for bad environment variable +type ErrBadEnvVariable struct { + msg string +} + +func (e ErrBadEnvVariable) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} diff --git a/opts/envfile_test.go b/opts/envfile_test.go new file mode 100644 index 00000000..cd0ca8f3 --- /dev/null +++ b/opts/envfile_test.go @@ -0,0 +1,133 @@ +package opts + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +func tmpFileWithContent(content string, t *testing.T) string { + tmpFile, err := ioutil.TempFile("", "envfile-test") + if err != nil { + t.Fatal(err) + } + defer tmpFile.Close() + + tmpFile.WriteString(content) + return tmpFile.Name() +} + +// Test ParseEnvFile for a file with a few well formatted lines +func TestParseEnvFileGoodFile(t *testing.T) { + content := `foo=bar + baz=quux +# comment + +_foobar=foobaz +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + expectedLines := []string{ + "foo=bar", + "baz=quux", + "_foobar=foobaz", + } + + if !reflect.DeepEqual(lines, expectedLines) { + t.Fatal("lines not equal to expected_lines") + } +} + +// Test ParseEnvFile for an empty file +func TestParseEnvFileEmptyFile(t *testing.T) { + tmpFile := tmpFileWithContent("", t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + if len(lines) != 0 { + t.Fatal("lines not empty; expected empty") + } +} + +// Test ParseEnvFile for a non existent file +func TestParseEnvFileNonExistentFile(t *testing.T) { + _, err := ParseEnvFile("foo_bar_baz") + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("Expected a PathError, got [%v]", err) + } +} + +// Test ParseEnvFile for a badly formatted file +func TestParseEnvFileBadlyFormattedFile(t *testing.T) { + content := `foo=bar + f =quux +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatalf("Expected a ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'f ' is not a valid environment variable" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} + +// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize +func TestParseEnvFileLineTooLongFile(t *testing.T) { + content := strings.Repeat("a", bufio.MaxScanTokenSize+42) + content = fmt.Sprint("foo=", content) + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } +} + +// ParseEnvFile with a random file, pass through +func TestParseEnvFileRandomFile(t *testing.T) { + content := `first line +another invalid line` + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + + if err == nil { + t.Fatalf("Expected a ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'first line' is not a valid environment variable" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} diff --git a/opts/hosts_unix.go b/opts/hosts_unix.go new file mode 100644 index 00000000..a29335e6 --- /dev/null +++ b/opts/hosts_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package opts + +import "fmt" + +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/opts/hosts_windows.go b/opts/hosts_windows.go new file mode 100644 index 00000000..55eac2aa --- /dev/null +++ b/opts/hosts_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package opts + +import "fmt" + +var DefaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) diff --git a/opts/ip.go b/opts/ip.go new file mode 100644 index 00000000..b1f95875 --- /dev/null +++ b/opts/ip.go @@ -0,0 +1,35 @@ +package opts + +import ( + "fmt" + "net" +) + +// IpOpt type that hold an IP +type IpOpt struct { + *net.IP +} + +func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt { + o := &IpOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +func (o *IpOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +func (o *IpOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} diff --git a/opts/ip_test.go b/opts/ip_test.go new file mode 100644 index 00000000..b6b526a5 --- /dev/null +++ b/opts/ip_test.go @@ -0,0 +1,54 @@ +package opts + +import ( + "net" + "testing" +) + +func TestIpOptString(t *testing.T) { + addresses := []string{"", "0.0.0.0"} + var ip net.IP + + for _, address := range addresses { + stringAddress := NewIpOpt(&ip, address).String() + if stringAddress != address { + t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + } + } +} + +func TestNewIpOptInvalidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "Not an ip" + + ipOpt := NewIpOpt(&ip, defaultVal) + + expected := "127.0.0.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestNewIpOptValidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "192.168.1.1" + + ipOpt := NewIpOpt(&ip, defaultVal) + + expected := "192.168.1.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestIpOptSetInvalidVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + ipOpt := &IpOpt{IP: &ip} + + invalidIp := "invalid ip" + expectedError := "invalid ip is not an ip address" + err := ipOpt.Set(invalidIp) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) + } +} diff --git a/opts/opts.go b/opts/opts.go new file mode 100644 index 00000000..6d1b2f7b --- /dev/null +++ b/opts/opts.go @@ -0,0 +1,379 @@ +package opts + +import ( + "fmt" + "net" + "os" + "path" + "regexp" + "strings" + + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/ulimit" + "github.com/docker/docker/volume" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) + // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080 + DefaultHTTPHost = "127.0.0.1" + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp:// + // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter + // is not supplied. A better longer term solution would be to use a named + // pipe as the default on the Windows daemon. + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" +) + +// ListVar Defines a flag with the specified names and usage, and put the value +// list into ListOpts that will hold the values. +func ListVar(values *[]string, names []string, usage string) { + flag.Var(NewListOptsRef(values, nil), names, usage) +} + +// MapVar Defines a flag with the specified names and usage, and put the value +// map into MapOpt that will hold the values (key,value). +func MapVar(values map[string]string, names []string, usage string) { + flag.Var(NewMapOpts(values, nil), names, usage) +} + +// LogOptsVar Defines a flag with the specified names and usage for --log-opts, +// and put the value map into MapOpt that will hold the values (key,value). +func LogOptsVar(values map[string]string, names []string, usage string) { + flag.Var(NewMapOpts(values, nil), names, usage) +} + +// HostListVar Defines a flag with the specified names and usage and put the +// value into a ListOpts that will hold the values, validating the Host format. +func HostListVar(values *[]string, names []string, usage string) { + flag.Var(NewListOptsRef(values, ValidateHost), names, usage) +} + +// IPListVar Defines a flag with the specified names and usage and put the +// value into a ListOpts that will hold the values, validating the IP format. +func IPListVar(values *[]string, names []string, usage string) { + flag.Var(NewListOptsRef(values, ValidateIPAddress), names, usage) +} + +// DNSSearchListVar Defines a flag with the specified names and usage and put the +// value into a ListOpts that will hold the values, validating the DNS search format. +func DNSSearchListVar(values *[]string, names []string, usage string) { + flag.Var(NewListOptsRef(values, ValidateDNSSearch), names, usage) +} + +// IPVar Defines a flag with the specified names and usage for IP and will use +// the specified defaultValue if the specified value is not valid. +func IPVar(value *net.IP, names []string, defaultValue, usage string) { + flag.Var(NewIpOpt(value, defaultValue), names, usage) +} + +// LabelListVar Defines a flag with the specified names and usage and put the +// value into a ListOpts that will hold the values, validating the label format. +func LabelListVar(values *[]string, names []string, usage string) { + flag.Var(NewListOptsRef(values, ValidateLabel), names, usage) +} + +// UlimitMapVar Defines a flag with the specified names and usage for --ulimit, +// and put the value map into a UlimitOpt that will hold the values. +func UlimitMapVar(values *map[string]*ulimit.Ulimit, names []string, usage string) { + flag.Var(NewUlimitOpt(values), names, usage) +} + +// ListOpts type that hold a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts Create a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string((*opts.values))) +} + +// Set validates if needed the input value and add it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete remove the given element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +// FIXME: can we remove this? +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values' slice. +// FIXME: Can we remove this? +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// Get checks the existence of the given key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +//MapOpts type that holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", map[string]string((opts.values))) +} + +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// ValidatorFctType validator that return a validate string and/or an error +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType validator that return a validate list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateAttach Validates that the specified string is a valid attach option. +func ValidateAttach(val string) (string, error) { + s := strings.ToLower(val) + for _, str := range []string{"stdin", "stdout", "stderr"} { + if s == str { + return s, nil + } + } + return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") +} + +// ValidateLink Validates that the specified string has a valid link format (containerName:alias). +func ValidateLink(val string) (string, error) { + if _, _, err := parsers.ParseLink(val); err != nil { + return val, err + } + return val, nil +} + +// ValidateDevice Validate a path for devices +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +func ValidateDevice(val string) (string, error) { + return validatePath(val, false) +} + +// ValidatePath Validate a path for volumes +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:rw|ro] +// It will also validate the mount mode. +func ValidatePath(val string) (string, error) { + return validatePath(val, true) +} + +func validatePath(val string, validateMountMode bool) (string, error) { + var containerPath string + var mode string + + if strings.Count(val, ":") > 2 { + return val, fmt.Errorf("bad format for volumes: %s", val) + } + + splited := strings.SplitN(val, ":", 3) + if splited[0] == "" { + return val, fmt.Errorf("bad format for volumes: %s", val) + } + switch len(splited) { + case 1: + containerPath = splited[0] + val = path.Clean(containerPath) + case 2: + if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid { + containerPath = splited[0] + mode = splited[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = splited[1] + val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath)) + } + case 3: + containerPath = splited[1] + mode = splited[2] + if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid { + return val, fmt.Errorf("bad mount mode specified : %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode) + } + + if !path.IsAbs(containerPath) { + return val, fmt.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +// ValidateEnv Validate an environment variable and returns it +// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid. +// If no value is specified, it returns the current value using os.Getenv. +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if len(arr) > 1 { + return val, nil + } + if !EnvironmentVariableRegexp.MatchString(arr[0]) { + return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)} + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +// ValidateIPAddress Validates an Ip address +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateMACAddress Validates a MAC address +func ValidateMACAddress(val string) (string, error) { + _, err := net.ParseMAC(strings.TrimSpace(val)) + if err != nil { + return "", err + } + return val, nil +} + +// ValidateDNSSearch Validates domain for resolvconf search configuration. +// A zero length domain is represented by . +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateExtraHost Validate that the given string is a valid extrahost and returns it +// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6) +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} + +// ValidateLabel Validate that the given string is a valid label, and returns it +// Labels are in the form on key=value +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +// ValidateHost Validate that the given string is a valid host and returns it +func ValidateHost(val string) (string, error) { + host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val) + if err != nil { + return val, err + } + return host, nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if parts[0] == name { + return true + } + } + return false +} diff --git a/opts/opts_test.go b/opts/opts_test.go new file mode 100644 index 00000000..f08df30b --- /dev/null +++ b/opts/opts_test.go @@ -0,0 +1,479 @@ +package opts + +import ( + "fmt" + "os" + "strings" + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewMapOpts(tmpMap, logOptsValidator) + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + + o.Set("max-file=2") + if len(tmpMap) != 2 { + t.Errorf("map length %d != 2", len(tmpMap)) + } + + if tmpMap["max-file"] != "2" { + t.Errorf("max-file = %s != 2", tmpMap["max-file"]) + } + + if tmpMap["max-size"] != "1" { + t.Errorf("max-size = %s != 1", tmpMap["max-size"]) + } + if o.Set("dummy-val=3") == nil { + t.Errorf("validator is not being called") + } +} + +func TestValidateMACAddress(t *testing.T) { + if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) + } + + if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") + } + + if _, err := ValidateMACAddress(`random invalid string`); err == nil { + t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") + } +} + +func TestListOptsWithoutValidator(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + o.Set("bar") + if o.Len() != 2 { + t.Errorf("%d != 2", o.Len()) + } + o.Set("bar") + if o.Len() != 3 { + t.Errorf("%d != 3", o.Len()) + } + if !o.Get("bar") { + t.Error("o.Get(\"bar\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("foo") + if o.String() != "[bar bar]" { + t.Errorf("%s != [bar bar]", o.String()) + } + listOpts := o.GetAll() + if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { + t.Errorf("Expected [[bar bar]], got [%v]", listOpts) + } + mapListOpts := o.GetMap() + if len(mapListOpts) != 1 { + t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) + } + +} + +func TestListOptsWithValidator(t *testing.T) { + // Re-using logOptsvalidator (used by MapOpts) + o := NewListOpts(logOptsValidator) + o.Set("foo") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("foo=bar") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("max-file=2") + if o.Len() != 1 { + t.Errorf("%d != 1", o.Len()) + } + if !o.Get("max-file=2") { + t.Error("o.Get(\"max-file=2\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("max-file=2") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } +} + +func TestValidateDNSSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, + } + + for _, domain := range valid { + if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} + +func TestValidateExtraHosts(t *testing.T) { + valid := []string{ + `myhost:192.168.0.1`, + `thathost:10.0.2.1`, + `anipv6host:2003:ab34:e::1`, + `ipv6local:::1`, + } + + invalid := map[string]string{ + `myhost:192.notanipaddress.1`: `invalid IP`, + `thathost-nosemicolon10.0.0.1`: `bad format`, + `anipv6host:::::1`: `invalid IP`, + `ipv6local:::0::`: `invalid IP`, + } + + for _, extrahost := range valid { + if _, err := ValidateExtraHost(extrahost); err != nil { + t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) + } + } + + for extraHost, expectedError := range invalid { + if _, err := ValidateExtraHost(extraHost); err == nil { + t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) + } + } + } +} + +func TestValidateAttach(t *testing.T) { + valid := []string{ + "stdin", + "stdout", + "stderr", + "STDIN", + "STDOUT", + "STDERR", + } + if _, err := ValidateAttach("invalid"); err == nil { + t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") + } + + for _, attach := range valid { + value, err := ValidateAttach(attach) + if err != nil { + t.Fatal(err) + } + if value != strings.ToLower(attach) { + t.Fatalf("Expected [%v], got [%v]", attach, value) + } + } +} + +func TestValidateLink(t *testing.T) { + valid := []string{ + "name", + "dcdfbe62ecd0:alias", + "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", + "angry_torvalds:linus", + } + invalid := map[string]string{ + "": "empty string specified for links", + "too:much:of:it": "bad format for links: too:much:of:it", + } + + for _, link := range valid { + if _, err := ValidateLink(link); err != nil { + t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) + } + } + + for link, expectedError := range invalid { + if _, err := ValidateLink(link); err == nil { + t.Fatalf("ValidateLink(`%q`) should have failed validation", link) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) + } + } + } +} + +func TestValidatePath(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/rw:/ro", + "/path:rw", + "/path:ro", + "/rw:rw", + } + invalid := map[string]string{ + "": "bad format for volumes: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for volumes: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for volumes: :test", + ":/test": "bad format for volumes: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for volumes: :test:", + "::": "bad format for volumes: ::", + ":::": "bad format for volumes: :::", + "/tmp:::": "bad format for volumes: /tmp:::", + ":/tmp::": "bad format for volumes: :/tmp::", + "path:ro": "path is not an absolute path", + "/path:/path:sw": "bad mount mode specified : sw", + "/path:/path:rwz": "bad mount mode specified : rwz", + } + + for _, path := range valid { + if _, err := ValidatePath(path); err != nil { + t.Fatalf("ValidatePath(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ValidatePath(path); err == nil { + t.Fatalf("ValidatePath(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidatePath(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} +func TestValidateDevice(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/hostPath:/containerPath:mrw", + } + invalid := map[string]string{ + "": "bad format for volumes: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for volumes: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for volumes: :test", + ":/test": "bad format for volumes: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for volumes: :test:", + "::": "bad format for volumes: ::", + ":::": "bad format for volumes: :::", + "/tmp:::": "bad format for volumes: /tmp:::", + ":/tmp::": "bad format for volumes: :/tmp::", + "path:ro": "ro is not an absolute path", + } + + for _, path := range valid { + if _, err := ValidateDevice(path); err != nil { + t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ValidateDevice(path); err == nil { + t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} + +func TestValidateEnv(t *testing.T) { + invalids := map[string]string{ + "some spaces": "poorly formatted environment: variable 'some spaces' is not a valid environment variable", + "asd!qwe": "poorly formatted environment: variable 'asd!qwe' is not a valid environment variable", + "1asd": "poorly formatted environment: variable '1asd' is not a valid environment variable", + "123": "poorly formatted environment: variable '123' is not a valid environment variable", + } + valids := map[string]string{ + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + } + for value, expectedError := range invalids { + _, err := ValidateEnv(value) + if err == nil { + t.Fatalf("Expected ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected ErrBadEnvVariable, got [%s]", err) + } + if err.Error() != expectedError { + t.Fatalf("Expected ErrBadEnvVariable with message [%s], got [%s]", expectedError, err.Error()) + } + } + for value, expected := range valids { + actual, err := ValidateEnv(value) + if err != nil { + t.Fatal(err) + } + if actual != expected { + t.Fatalf("Expected [%v], got [%v]", expected, actual) + } + } +} + +func TestValidateLabel(t *testing.T) { + if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { + t.Fatalf("Expected an error [bad attribute format: label], go %v", err) + } + if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { + t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) + } + // Validate it's working with more than one = + if actual, err := ValidateLabel("key1=value1=value2"); err != nil { + t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) + } + // Validate it's working with one more + if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { + t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) + } +} + +func TestValidateHost(t *testing.T) { + invalid := map[string]string{ + "anything": "Invalid bind address format: anything", + "something with spaces": "Invalid bind address format: something with spaces", + "://": "Invalid bind address format: ://", + "unknown://": "Invalid bind address format: unknown://", + "tcp://": "Invalid proto, expected tcp: ", + "tcp://:port": "Invalid bind address format: :port", + "tcp://invalid": "Invalid bind address format: invalid", + "tcp://invalid:port": "Invalid bind address format: invalid:port", + } + valid := map[string]string{ + "fd://": "fd://", + "fd://something": "fd://something", + "tcp://:2375": "tcp://127.0.0.1:2375", // default ip address + "tcp://:2376": "tcp://127.0.0.1:2376", // default ip address + "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", + "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", + "tcp://192.168:8080": "tcp://192.168:8080", + "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P + "tcp://docker.com:2375": "tcp://docker.com:2375", + "unix://": "unix:///var/run/docker.sock", // default unix:// value + "unix://path/to/socket": "unix://path/to/socket", + } + + for value, errorMessage := range invalid { + if _, err := ValidateHost(value); err == nil || err.Error() != errorMessage { + t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err) + } + } + for value, expected := range valid { + if actual, err := ValidateHost(value); err != nil || actual != expected { + t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) + } + } +} + +func logOptsValidator(val string) (string, error) { + allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} + vals := strings.Split(val, "=") + if allowedKeys[vals[0]] != "" { + return val, nil + } + return "", fmt.Errorf("invalid key %s", vals[0]) +} diff --git a/opts/ulimit.go b/opts/ulimit.go new file mode 100644 index 00000000..f8d34365 --- /dev/null +++ b/opts/ulimit.go @@ -0,0 +1,47 @@ +package opts + +import ( + "fmt" + + "github.com/docker/docker/pkg/ulimit" +) + +type UlimitOpt struct { + values *map[string]*ulimit.Ulimit +} + +func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*ulimit.Ulimit{} + } + return &UlimitOpt{ref} +} + +func (o *UlimitOpt) Set(val string) error { + l, err := ulimit.Parse(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +func (o *UlimitOpt) GetList() []*ulimit.Ulimit { + var ulimits []*ulimit.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} diff --git a/opts/ulimit_test.go b/opts/ulimit_test.go new file mode 100644 index 00000000..3845d1ec --- /dev/null +++ b/opts/ulimit_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "testing" + + "github.com/docker/docker/pkg/ulimit" +) + +func TestUlimitOpt(t *testing.T) { + ulimitMap := map[string]*ulimit.Ulimit{ + "nofile": {"nofile", 1024, 512}, + } + + ulimitOpt := NewUlimitOpt(&ulimitMap) + + expected := "[nofile=512:1024]" + if ulimitOpt.String() != expected { + t.Fatalf("Expected %v, got %v", expected, ulimitOpt) + } + + // Valid ulimit append to opts + if err := ulimitOpt.Set("core=1024:1024"); err != nil { + t.Fatal(err) + } + + // Invalid ulimit type returns an error and do not append to opts + if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { + t.Fatalf("Expected error on invalid ulimit type") + } + expected = "[nofile=512:1024 core=1024:1024]" + expected2 := "[core=1024:1024 nofile=512:1024]" + result := ulimitOpt.String() + if result != expected && result != expected2 { + t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) + } + + // And test GetList + ulimits := ulimitOpt.GetList() + if len(ulimits) != 2 { + t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) + } +} diff --git a/pkg/README.md b/pkg/README.md new file mode 100644 index 00000000..c4b78a8a --- /dev/null +++ b/pkg/README.md @@ -0,0 +1,11 @@ +pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. + +Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. +If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the +Docker organization, to facilitate re-use by other projects. However that is not the priority. + +The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core +Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good +place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/pkg/ansiescape/split.go b/pkg/ansiescape/split.go new file mode 100644 index 00000000..b097b25f --- /dev/null +++ b/pkg/ansiescape/split.go @@ -0,0 +1,89 @@ +package ansiescape + +import "bytes" + +// dropCR drops a leading or terminal \r from the data. +func dropCR(data []byte) []byte { + if len(data) > 0 && data[len(data)-1] == '\r' { + data = data[0 : len(data)-1] + } + if len(data) > 0 && data[0] == '\r' { + data = data[1:] + } + return data +} + +// escapeSequenceLength calculates the length of an ANSI escape sequence +// If there is not enough characters to match a sequence, -1 is returned, +// if there is no valid sequence 0 is returned, otherwise the number +// of bytes in the sequence is returned. Only returns length for +// line moving sequences. +func escapeSequenceLength(data []byte) int { + next := 0 + if len(data) <= next { + return -1 + } + if data[next] != '[' { + return 0 + } + for { + next = next + 1 + if len(data) <= next { + return -1 + } + if (data[next] > '9' || data[next] < '0') && data[next] != ';' { + break + } + } + if len(data) <= next { + return -1 + } + // Only match line moving codes + switch data[next] { + case 'A', 'B', 'E', 'F', 'H', 'h': + return next + 1 + } + + return 0 +} + +// ScanANSILines is a scanner function which splits the +// input based on ANSI escape codes and new lines. +func ScanANSILines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + + // Look for line moving escape sequence + if i := bytes.IndexByte(data, '\x1b'); i >= 0 { + last := 0 + for i >= 0 { + last = last + i + + // get length of ANSI escape sequence + sl := escapeSequenceLength(data[last+1:]) + if sl == -1 { + return 0, nil, nil + } + if sl == 0 { + // If no relevant sequence was found, skip + last = last + 1 + i = bytes.IndexByte(data[last:], '\x1b') + continue + } + + return last + 1 + sl, dropCR(data[0:(last)]), nil + } + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + // No escape sequence, check for new line + return i + 1, dropCR(data[0:i]), nil + } + + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), dropCR(data), nil + } + // Request more data. + return 0, nil, nil +} diff --git a/pkg/ansiescape/split_test.go b/pkg/ansiescape/split_test.go new file mode 100644 index 00000000..ecb24b93 --- /dev/null +++ b/pkg/ansiescape/split_test.go @@ -0,0 +1,53 @@ +package ansiescape + +import ( + "bufio" + "strings" + "testing" +) + +func TestSplit(t *testing.T) { + lines := []string{ + "test line 1", + "another test line", + "some test line", + "line with non-cursor moving sequence \x1b[1T", // Scroll Down + "line with \x1b[31;1mcolor\x1b[0m then reset", // "color" in Bold Red + "cursor forward \x1b[1C and backward \x1b[1D", + "invalid sequence \x1babcd", + "", + "after empty", + } + splitSequences := []string{ + "\x1b[1A", // Cursor up + "\x1b[1B", // Cursor down + "\x1b[1E", // Cursor next line + "\x1b[1F", // Cursor previous line + "\x1b[1;1H", // Move cursor to position + "\x1b[1;1h", // Move cursor to position + "\n", + "\r\n", + "\n\r", + "\x1b[1A\r", + "\r\x1b[1A", + } + + for _, sequence := range splitSequences { + scanner := bufio.NewScanner(strings.NewReader(strings.Join(lines, sequence))) + scanner.Split(ScanANSILines) + i := 0 + for scanner.Scan() { + if i >= len(lines) { + t.Fatalf("Too many scanned lines") + } + scanned := scanner.Text() + if scanned != lines[i] { + t.Fatalf("Wrong line scanned with sequence %q\n\tExpected: %q\n\tActual: %q", sequence, lines[i], scanned) + } + i++ + } + if i < len(lines) { + t.Errorf("Wrong number of lines for sequence %q: %d, expected %d", sequence, i, len(lines)) + } + } +} diff --git a/pkg/archive/README.md b/pkg/archive/README.md new file mode 100644 index 00000000..7307d969 --- /dev/null +++ b/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go new file mode 100644 index 00000000..3f3c819a --- /dev/null +++ b/pkg/archive/archive.go @@ -0,0 +1,908 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" +) + +type ( + Archive io.ReadCloser + ArchiveReader io.Reader + Compression int + TarChownOptions struct { + UID, GID int + } + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + ChownOpts *TarChownOptions + IncludeSourceDir bool + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar} +) + +const ( + Uncompressed Compression = iota + Bzip2 + Gzip + Xz +) + +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debugf("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return CmdStream(exec.Command(args[0], args[1:]...), archive) +} + +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil { + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's a regular file and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if fi.Mode().IsRegular() && nlink > 1 { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debugf("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + return err + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + // syscall.UtimesNano doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } else { + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Debugf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Debugf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + // We can't use filepath.Join(srcPath, include) because this will + // clean away a trailing "." or "/" which may be important. + walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator)) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Debugf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + if !exceptions && f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", filePath, err) + } + return nil + }) + } + }() + + return pipeReader, nil +} + +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0777) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + var r io.Reader = tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + return archiver.Untar(archive, dst, nil) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + if err := archiver.Untar(archive, dst, nil); err != nil { + return err + } + return nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err != nil { + err = er + } + }() + return archiver.Untar(r, filepath.Dir(dst), nil) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// CmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + if input != nil { + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + // Write stdin if any + go func() { + io.Copy(stdin, input) + stdin.Close() + }() + } + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + pipeR, pipeW := io.Pipe() + errChan := make(chan []byte) + // Collect stderr, we will use it in case of an error + go func() { + errText, e := ioutil.ReadAll(stderr) + if e != nil { + errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") + } + errChan <- errText + }() + // Copy stdout to the returned pipe + go func() { + _, err := io.Copy(pipeW, stdout) + if err != nil { + pipeW.CloseWithError(err) + } + errText := <-errChan + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) + } else { + pipeW.Close() + } + }() + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + return pipeR, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go new file mode 100644 index 00000000..b9bfc239 --- /dev/null +++ b/pkg/archive/archive_test.go @@ -0,0 +1,1204 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "syscall" + "testing" + "time" + + "github.com/docker/docker/pkg/system" +) + +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } +} + +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } +} + +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } +} + +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.gz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a gzip file.") + } +} + +func TestDecompressStreamBzip2(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.bz2") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a bzip2 file.") + } +} + +func TestDecompressStreamXz(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.xz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a xz file.") + } +} + +func TestCompressStreamXzUnsuported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of a uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, err := CmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := path.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := path.Join(tempFolder, "src") + _, err = os.Create(srcFile) + if err != nil { + t.Fatalf("Fail to create the source file") + } + err = UntarPath(srcFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = UntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := path.Join(destFolder, srcFile) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := path.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := path.Join(destFolder, srcFile) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := path.Join(tempFolder, "dest") + invalidSrc := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, path.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := path.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := path.Join(tempFolder, "doesnotexists") + err = CopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestTarFiles(t *testing.T) { + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarUntarWithXattr(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} + +func TestTarWithOptions(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, + {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(path.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(path.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(path.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + // We need this conversion on ARM64 + return uint64(statT.Nlink), nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, false) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarHardlinkToSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go new file mode 100644 index 00000000..9e1dfad2 --- /dev/null +++ b/pkg/archive/archive_unix.go @@ -0,0 +1,89 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + nlink = uint32(s.Nlink) + inode = uint64(s.Ino) + + // Currently go does not fil in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/pkg/archive/archive_unix_test.go b/pkg/archive/archive_unix_test.go new file mode 100644 index 00000000..18f45c48 --- /dev/null +++ b/pkg/archive/archive_unix_test.go @@ -0,0 +1,60 @@ +// +build !windows + +package archive + +import ( + "os" + "testing" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct{ in, expected string }{ + {"foo", "foo"}, + {"foo/bar", "foo/bar"}, + {"foo/dir/", "foo/dir/"}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {"foo/bar", false, "foo/bar"}, + {"foo/bar", true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0000}, + {0777, 0777}, + {0644, 0644}, + {0755, 0755}, + {0444, 0444}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go new file mode 100644 index 00000000..10db4bd0 --- /dev/null +++ b/pkg/archive/archive_windows.go @@ -0,0 +1,50 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "strings" +) + +// canonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} diff --git a/pkg/archive/archive_windows_test.go b/pkg/archive/archive_windows_test.go new file mode 100644 index 00000000..72bc71e0 --- /dev/null +++ b/pkg/archive/archive_windows_test.go @@ -0,0 +1,65 @@ +// +build windows + +package archive + +import ( + "os" + "testing" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct { + in, expected string + shouldFail bool + }{ + {"foo", "foo", false}, + {"foo/bar", "___", true}, // unix-styled windows path must fail + {`foo\bar`, "foo/bar", false}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if v.shouldFail && err == nil { + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) + } else if !v.shouldFail && out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {`foo\bar`, false, "foo/bar"}, + {`foo\bar`, true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go new file mode 100644 index 00000000..689d9a21 --- /dev/null +++ b/pkg/archive/changes.go @@ -0,0 +1,383 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +type ChangeType int + +const ( + ChangeModify = iota + ChangeAdd + ChangeDelete +) + +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // Skip AUFS metadata + if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched { + return err + } + + change := Change{ + Path: path, + } + + // Find out what kind of modification happened + file := filepath.Base(path) + // If there is a whiteout, then the file was removed + if strings.HasPrefix(file, ".wh.") { + originalFile := file[len(".wh."):] + change.Path = filepath.Join(filepath.Dir(path), originalFile) + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +type FileInfo struct { + parent *FileInfo + name string + stat *system.Stat_t + children map[string]*FileInfo + capability []byte + added bool +} + +func (root *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := root + if path == string(os.PathSeparator) { + return root + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var size int64 + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, _ := os.Lstat(file) + if fileInfo != nil && !fileInfo.IsDir() { + size += fileInfo.Size() + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change) (Archive, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/pkg/archive/changes_linux.go b/pkg/archive/changes_linux.go new file mode 100644 index 00000000..dee8b7c6 --- /dev/null +++ b/pkg/archive/changes_linux.go @@ -0,0 +1,285 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/pkg/archive/changes_other.go b/pkg/archive/changes_other.go new file mode 100644 index 00000000..da70ed37 --- /dev/null +++ b/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/pkg/archive/changes_posix_test.go b/pkg/archive/changes_posix_test.go new file mode 100644 index 00000000..9d528e61 --- /dev/null +++ b/pkg/archive/changes_posix_test.go @@ -0,0 +1,127 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "sort" + "testing" +) + +func TestHardLinkOrder(t *testing.T) { + names := []string{"file1.txt", "file2.txt", "file3.txt"} + msg := []byte("Hey y'all") + + // Create dir + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") + if err != nil { + t.Fatal(err) + } + //defer os.RemoveAll(src) + for _, name := range names { + func() { + fh, err := os.Create(path.Join(src, name)) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if _, err = fh.Write(msg); err != nil { + t.Fatal(err) + } + }() + } + // Create dest, with changes that includes hardlinks + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(dest) // we just want the name, at first + if err := copyDir(src, dest); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + for _, name := range names { + for i := 0; i < 5; i++ { + if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { + t.Fatal(err) + } + } + } + + // get changes + changes, err := ChangesDirs(dest, src) + if err != nil { + t.Fatal(err) + } + + // sort + sort.Sort(changesByPath(changes)) + + // ExportChanges + ar, err := ExportChanges(dest, changes) + if err != nil { + t.Fatal(err) + } + hdrs, err := walkHeaders(ar) + if err != nil { + t.Fatal(err) + } + + // reverse sort + sort.Sort(sort.Reverse(changesByPath(changes))) + // ExportChanges + arRev, err := ExportChanges(dest, changes) + if err != nil { + t.Fatal(err) + } + hdrsRev, err := walkHeaders(arRev) + if err != nil { + t.Fatal(err) + } + + // line up the two sets + sort.Sort(tarHeaders(hdrs)) + sort.Sort(tarHeaders(hdrsRev)) + + // compare Size and LinkName + for i := range hdrs { + if hdrs[i].Name != hdrsRev[i].Name { + t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) + } + if hdrs[i].Size != hdrsRev[i].Size { + t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) + } + if hdrs[i].Typeflag != hdrsRev[i].Typeflag { + t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) + } + if hdrs[i].Linkname != hdrsRev[i].Linkname { + t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) + } + } + +} + +type tarHeaders []tar.Header + +func (th tarHeaders) Len() int { return len(th) } +func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } +func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } + +func walkHeaders(r io.Reader) ([]tar.Header, error) { + t := tar.NewReader(r) + headers := []tar.Header{} + for { + hdr, err := t.Next() + if err != nil { + if err == io.EOF { + break + } + return headers, err + } + headers = append(headers, *hdr) + } + return headers, nil +} diff --git a/pkg/archive/changes_test.go b/pkg/archive/changes_test.go new file mode 100644 index 00000000..509bdb2e --- /dev/null +++ b/pkg/archive/changes_test.go @@ -0,0 +1,495 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "sort" + "testing" + "time" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := os.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + // Mock the readonly layer + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Mock the RW layer + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + + // Create a folder in RW layer + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1", ChangeModify}, + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) +} + +// See https://github.com/docker/docker/pull/13590 +func TestChangesWithChangesGH13590(t *testing.T) { + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + defer os.RemoveAll(baseLayer) + + dir3 := path.Join(baseLayer, "dir1/dir2/dir3") + os.MkdirAll(dir3, 07400) + + file := path.Join(dir3, "file.txt") + ioutil.WriteFile(file, []byte("hello"), 0666) + + layer, err := ioutil.TempDir("", "docker-changes-test2.") + defer os.RemoveAll(layer) + + // Test creating a new file + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) + file = path.Join(layer, "dir1/dir2/dir3/file1.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err := Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1/dir2/dir3", ChangeModify}, + {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) + + // Now test changing a file + layer, err = ioutil.TempDir("", "docker-changes-test3.") + defer os.RemoveAll(layer) + + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + file = path.Join(layer, "dir1/dir2/dir3/file.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err = Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges = []Change{ + {"/dir1/dir2/dir3/file.txt", ChangeModify}, + } + checkChanges(expectedChanges, changes, t) +} + +// Create an directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func checkChanges(expectedChanges, changes []Change, t *testing.T) { + sort.Sort(changesByPath(expectedChanges)) + sort.Sort(changesByPath(changes)) + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} diff --git a/pkg/archive/changes_unix.go b/pkg/archive/changes_unix.go new file mode 100644 index 00000000..d780f163 --- /dev/null +++ b/pkg/archive/changes_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package archive + +import ( + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.Uid() != newStat.Uid() || + oldStat.Gid() != newStat.Gid() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} diff --git a/pkg/archive/changes_windows.go b/pkg/archive/changes_windows.go new file mode 100644 index 00000000..4809b7a5 --- /dev/null +++ b/pkg/archive/changes_windows.go @@ -0,0 +1,20 @@ +package archive + +import ( + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} diff --git a/pkg/archive/copy.go b/pkg/archive/copy.go new file mode 100644 index 00000000..39bb4fd7 --- /dev/null +++ b/pkg/archive/copy.go @@ -0,0 +1,391 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + log "github.com/Sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) { + if !HasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// AssertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func AssertsDirectory(path string) bool { + return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path) +} + +// HasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func HasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// SpecifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func SpecifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(path) + + if SpecifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content Archive, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between it's directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + log.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string) (CopyInfo, error) { + // Split the given path into its Directory and Base components. We will + // evaluate symlinks in the directory component then append the base. + dirPath, basePath := filepath.Split(path) + + resolvedDirPath, err := filepath.EvalSymlinks(dirPath) + if err != nil { + return CopyInfo{}, err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath + + var rebaseName string + if HasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !filepath.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case AssertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// rebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurance of oldBase with newBase at the beginning of entry names. +func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive { + if oldBase == "/" { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string) error { + var ( + srcInfo CopyInfo + err error + ) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(dstPath) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} diff --git a/pkg/archive/copy_test.go b/pkg/archive/copy_test.go new file mode 100644 index 00000000..8acf1ecf --- /dev/null +++ b/pkg/archive/copy_test.go @@ -0,0 +1,625 @@ +package archive + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func removeAllPaths(paths ...string) { + for _, path := range paths { + os.RemoveAll(path) + } +} + +func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { + var err error + + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + return +} + +func isNotDir(err error) bool { + return strings.Contains(err.Error(), "not a directory") +} + +func joinTrailingSep(pathElements ...string) string { + joined := filepath.Join(pathElements...) + + return fmt.Sprintf("%s%c", joined, filepath.Separator) +} + +func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { + t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) + + fileA, err := os.Open(filenameA) + if err != nil { + return + } + defer fileA.Close() + + fileB, err := os.Open(filenameB) + if err != nil { + return + } + defer fileB.Close() + + hasher := sha256.New() + + if _, err = io.Copy(hasher, fileA); err != nil { + return + } + + hashA := hasher.Sum(nil) + hasher.Reset() + + if _, err = io.Copy(hasher, fileB); err != nil { + return + } + + hashB := hasher.Sum(nil) + + if !bytes.Equal(hashA, hashB) { + err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) + } + + return +} + +func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { + t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) + + var changes []Change + + if changes, err = ChangesDirs(newDir, oldDir); err != nil { + return + } + + if len(changes) != 0 { + err = fmt.Errorf("expected no changes between directories, but got: %v", changes) + } + + return +} + +func logDirContents(t *testing.T, dirPath string) { + logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("stat error for path %q: %s", path, err) + return nil + } + + if info.IsDir() { + path = joinTrailingSep(path) + } + + t.Logf("\t%s", path) + + return nil + }) + + t.Logf("logging directory contents: %q", dirPath) + + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } +} + +func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q", srcPath, dstPath) + + return CopyResource(srcPath, dstPath) +} + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func TestCopyErrSrcNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1")); !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func TestCopyErrSrcNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1")); !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func TestCopyErrDstParentNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + // Try with a file source. + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a file whose parent does not exist. + if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a directory whose parent does not exist. + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func TestCopyErrDstNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + // Try with a file source. + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func TestCopyCaseA(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "itWorks.txt") + + var err error + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func TestCopyCaseB(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := joinTrailingSep(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseC(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // Ensure they start out different. + if err = fileContentsEqual(t, srcPath, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseD(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseE(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func TestCopyCaseF(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseG(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseH(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// I. SRC specifies a direcotry's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func TestCopyCaseI(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJ(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go new file mode 100644 index 00000000..d310a17a --- /dev/null +++ b/pkg/archive/diff.go @@ -0,0 +1,210 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertantly. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, ".wh..wh.") { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { + return 0, err + } + } + continue + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, ".wh.") { + originalBase := base[len(".wh."):] + originalPath := filepath.Join(filepath.Dir(path), originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { + return applyLayerHandler(dest, layer, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) { + return applyLayerHandler(dest, layer, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer) +} diff --git a/pkg/archive/diff_test.go b/pkg/archive/diff_test.go new file mode 100644 index 00000000..01ed4372 --- /dev/null +++ b/pkg/archive/diff_test.go @@ -0,0 +1,190 @@ +package archive + +import ( + "archive/tar" + "testing" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff --git a/pkg/archive/example_changes.go b/pkg/archive/example_changes.go new file mode 100644 index 00000000..cedd46a4 --- /dev/null +++ b/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/pkg/archive/testdata/broken.tar b/pkg/archive/testdata/broken.tar new file mode 100644 index 00000000..8f10ea6b Binary files /dev/null and b/pkg/archive/testdata/broken.tar differ diff --git a/pkg/archive/time_linux.go b/pkg/archive/time_linux.go new file mode 100644 index 00000000..3448569b --- /dev/null +++ b/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/pkg/archive/time_unsupported.go b/pkg/archive/time_unsupported.go new file mode 100644 index 00000000..e85aac05 --- /dev/null +++ b/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/pkg/archive/utils_test.go b/pkg/archive/utils_test.go new file mode 100644 index 00000000..f5cacea8 --- /dev/null +++ b/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + _, err := ApplyLayer(dest, ArchiveReader(r)) + return err + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/pkg/archive/wrap.go b/pkg/archive/wrap.go new file mode 100644 index 00000000..dfb335c0 --- /dev/null +++ b/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/pkg/archive/wrap_test.go b/pkg/archive/wrap_test.go new file mode 100644 index 00000000..46ab3669 --- /dev/null +++ b/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/pkg/broadcastwriter/broadcastwriter.go b/pkg/broadcastwriter/broadcastwriter.go new file mode 100644 index 00000000..5d53658e --- /dev/null +++ b/pkg/broadcastwriter/broadcastwriter.go @@ -0,0 +1,51 @@ +package broadcastwriter + +import ( + "io" + "sync" +) + +// BroadcastWriter accumulate multiple io.WriteCloser by stream. +type BroadcastWriter struct { + sync.Mutex + writers map[io.WriteCloser]struct{} +} + +// AddWriter adds new io.WriteCloser. +func (w *BroadcastWriter) AddWriter(writer io.WriteCloser) { + w.Lock() + w.writers[writer] = struct{}{} + w.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *BroadcastWriter) Write(p []byte) (n int, err error) { + w.Lock() + for sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + delete(w.writers, sw) + } + } + w.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *BroadcastWriter) Clean() error { + w.Lock() + for w := range w.writers { + w.Close() + } + w.writers = make(map[io.WriteCloser]struct{}) + w.Unlock() + return nil +} + +func New() *BroadcastWriter { + return &BroadcastWriter{ + writers: make(map[io.WriteCloser]struct{}), + } +} diff --git a/pkg/broadcastwriter/broadcastwriter_test.go b/pkg/broadcastwriter/broadcastwriter_test.go new file mode 100644 index 00000000..bc243207 --- /dev/null +++ b/pkg/broadcastwriter/broadcastwriter_test.go @@ -0,0 +1,144 @@ +package broadcastwriter + +import ( + "bytes" + "errors" + + "testing" +) + +type dummyWriter struct { + buffer bytes.Buffer + failOnWrite bool +} + +func (dw *dummyWriter) Write(p []byte) (n int, err error) { + if dw.failOnWrite { + return 0, errors.New("Fake fail") + } + return dw.buffer.Write(p) +} + +func (dw *dummyWriter) String() string { + return dw.buffer.String() +} + +func (dw *dummyWriter) Close() error { + return nil +} + +func TestBroadcastWriter(t *testing.T) { + writer := New() + + // Test 1: Both bufferA and bufferB should contain "foo" + bufferA := &dummyWriter{} + writer.AddWriter(bufferA) + bufferB := &dummyWriter{} + writer.AddWriter(bufferB) + writer.Write([]byte("foo")) + + if bufferA.String() != "foo" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foo" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + // Test2: bufferA and bufferB should contain "foobar", + // while bufferC should only contain "bar" + bufferC := &dummyWriter{} + writer.AddWriter(bufferC) + writer.Write([]byte("bar")) + + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + if bufferC.String() != "bar" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test3: Test eviction on failure + bufferA.failOnWrite = true + writer.Write([]byte("fail")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfail" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + // Even though we reset the flag, no more writes should go in there + bufferA.failOnWrite = false + writer.Write([]byte("test")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfailtest" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + writer.Clean() +} + +type devNullCloser int + +func (d devNullCloser) Close() error { + return nil +} + +func (d devNullCloser) Write(buf []byte) (int, error) { + return len(buf), nil +} + +// This test checks for races. It is only useful when run with the race detector. +func TestRaceBroadcastWriter(t *testing.T) { + writer := New() + c := make(chan bool) + go func() { + writer.AddWriter(devNullCloser(0)) + c <- true + }() + writer.Write([]byte("hello")) + <-c +} + +func BenchmarkBroadcastWriter(b *testing.B) { + writer := New() + setUpWriter := func() { + for i := 0; i < 100; i++ { + writer.AddWriter(devNullCloser(0)) + writer.AddWriter(devNullCloser(0)) + writer.AddWriter(devNullCloser(0)) + } + } + testLine := "Line that thinks that it is log line from docker" + var buf bytes.Buffer + for i := 0; i < 100; i++ { + buf.Write([]byte(testLine + "\n")) + } + // line without eol + buf.Write([]byte(testLine)) + testText := buf.Bytes() + b.SetBytes(int64(5 * len(testText))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + setUpWriter() + b.StartTimer() + + for j := 0; j < 5; j++ { + if _, err := writer.Write(testText); err != nil { + b.Fatal(err) + } + } + + b.StopTimer() + writer.Clean() + b.StartTimer() + } +} diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go new file mode 100644 index 00000000..8e8e1597 --- /dev/null +++ b/pkg/chrootarchive/archive.go @@ -0,0 +1,92 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" +) + +var chrootArchiver = &archive.Archiver{Untar: Untar} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := system.MkdirAll(dest, 0777); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go new file mode 100644 index 00000000..1d6c2b92 --- /dev/null +++ b/pkg/chrootarchive/archive_test.go @@ -0,0 +1,381 @@ +package chrootarchive + +import ( + "bytes" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" +) + +func init() { + reexec.Init() +} + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} + +// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of +// local images) +func TestChrootUntarWithHugeExcludesList(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + options := &archive.TarOptions{} + //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow + //on most systems when passed via environment or command line arguments + excludes := make([]string, 65534, 65534) + for i := 0; i < 65534; i++ { + excludes[i] = strings.Repeat(string(i), 64) + } + options.ExcludePatterns = excludes + if err := Untar(stream, dest, options); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarEmptyArchive(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := Untar(nil, tmpdir, nil); err == nil { + t.Fatal("expected error on empty archive") + } +} + +func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeSymLinks { + if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func getHash(filename string) (uint32, error) { + stream, err := ioutil.ReadFile(filename) + if err != nil { + return 0, err + } + hash := crc32.NewIEEE() + hash.Write(stream) + return hash.Sum32(), nil +} + +func compareDirectories(src string, dest string) error { + changes, err := archive.ChangesDirs(dest, src) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("Unexpected differences after untar: %v", changes) + } + return nil +} + +func compareFiles(src string, dest string) error { + srcHash, err := getHash(src) + if err != nil { + return err + } + destHash, err := getHash(dest) + if err != nil { + return err + } + if srcHash != destHash { + return fmt.Errorf("%s is different from %s", src, dest) + } + return nil +} + +func TestChrootTarUntarWithSymlink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := TarUntar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyWithTar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyWithTar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyFileWithTar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyFileWithTar(src, dest); err == nil { + t.Fatal("Expected error on copying directory") + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyFileWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + // Untar a directory + if err := UntarPath(src, dest); err == nil { + t.Fatal("Expected error on untaring a directory") + } + + // Untar a tar file + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(stream) + tarfile := filepath.Join(tmpdir, "src.tar") + if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil { + t.Fatal(err) + } + if err := UntarPath(tarfile, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +type slowEmptyTarReader struct { + size int + offset int + chunkSize int +} + +// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") +func (s *slowEmptyTarReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + count := s.chunkSize + if len(p) < s.chunkSize { + count = len(p) + } + for i := 0; i < count; i++ { + p[i] = 0 + } + s.offset += count + if s.offset > s.size { + return count, io.EOF + } + return count, nil +} + +func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := Untar(stream, dest, nil); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyDotDotFile(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/chrootarchive/archive_unix.go b/pkg/chrootarchive/archive_unix.go new file mode 100644 index 00000000..83331425 --- /dev/null +++ b/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,85 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + flush(os.Stdin) + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output) + } + return nil +} diff --git a/pkg/chrootarchive/archive_windows.go b/pkg/chrootarchive/archive_windows.go new file mode 100644 index 00000000..c44556cf --- /dev/null +++ b/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,21 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, dest, options) +} diff --git a/pkg/chrootarchive/diff_unix.go b/pkg/chrootarchive/diff_unix.go new file mode 100644 index 00000000..bec85a0d --- /dev/null +++ b/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,114 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir = "" + err error + ) + runtime.LockOSThread() + flag.Parse() + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + flush(os.Stdout) + flush(os.Stdin) + os.Exit(0) +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) { + return applyLayerHandler(dest, layer, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer archive.ArchiveReader) (int64, error) { + return applyLayerHandler(dest, layer, false) +} + +func applyLayerHandler(dest string, layer archive.ArchiveReader, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/pkg/chrootarchive/diff_windows.go b/pkg/chrootarchive/diff_windows.go new file mode 100644 index 00000000..0c9b38e4 --- /dev/null +++ b/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,35 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) { + dest = filepath.Clean(dest) + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, decompressed) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) + } + + return s, nil +} diff --git a/pkg/chrootarchive/init_unix.go b/pkg/chrootarchive/init_unix.go new file mode 100644 index 00000000..49fcacce --- /dev/null +++ b/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) { + io.Copy(ioutil.Discard, r) +} diff --git a/pkg/chrootarchive/init_windows.go b/pkg/chrootarchive/init_windows.go new file mode 100644 index 00000000..fa17c9bf --- /dev/null +++ b/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/pkg/devicemapper/attach_loopback.go b/pkg/devicemapper/attach_loopback.go new file mode 100644 index 00000000..424a9746 --- /dev/null +++ b/pkg/devicemapper/attach_loopback.go @@ -0,0 +1,129 @@ +// +build linux + +package devicemapper + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Errorf("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// attachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start loopking for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &LoopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Errorf("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go new file mode 100644 index 00000000..84d0729b --- /dev/null +++ b/pkg/devicemapper/devmapper.go @@ -0,0 +1,807 @@ +// +build linux + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +type DevmapperLogger interface { + DMLog(level int, file string, line int, dmError int, message string) +} + +const ( + DeviceCreate TaskType = iota + DeviceReload + DeviceRemove + DeviceRemoveAll + DeviceSuspend + DeviceResume + DeviceInfo + DeviceDeps + DeviceRename + DeviceVersion + DeviceStatus + DeviceTable + DeviceWaitevent + DeviceList + DeviceClear + DeviceMknodes + DeviceListVersions + DeviceTargetMsg + DeviceSetGeometry +) + +const ( + AddNodeOnResume AddNodeType = iota + AddNodeOnCreate +) + +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrAttachLoopbackDevice = errors.New("loopback mounting failed") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIdExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") + + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + Task struct { + unmanaged *CDmTask + } + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + TaskType int + AddNodeType int +) + +// Returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIdExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIdExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("Can't create task of type %d", int(t)) + } + if err := task.SetName(name); err != nil { + return nil, fmt.Errorf("Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) Run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) SetName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) SetMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) SetSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) SetCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) SetAddNode(addNode AddNodeType) error { + if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) SetRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) AddTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) GetDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) GetInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) GetInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) GetDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) GetNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +func LoopbackSetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("Error loopbackSetCapacity: %s", err) + return ErrLoopbackSetCapacity + } + return nil +} + +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} + +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("Failed to wait on udev cookie %d", *cookie) + return ErrUdevWait + } + return nil +} + +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger = nil + +// initialize the logger for the device mapper library +func LogInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debugf("Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// Useful helper for cleanup +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(DeviceRemove, name) + if task == nil { + return err + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can not set cookie: %s", err) + } + defer UdevWait(&cookie) + + dmSawBusy = false // reset before the task is run + if err = task.Run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("Error running RemoveDevice %s", err) + } + + return nil +} + +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("[devmapper] RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("[devmapper] RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(DeviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + if err = task.Run(); err != nil { + return fmt.Errorf("Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// Useful helper for cleanup +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(DeviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.Run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("Error running CancelDeferredRemove %s", err) + + } + return nil +} + +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// This is the programmatic example of "dmsetup create" +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(DeviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + var cookie uint = 0 + var flags uint16 = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.SetCookie(&cookie, flags); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err) + } + + return nil +} + +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(DeviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate %s", err) + } + + return nil +} + +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(DeviceDeps, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetDeps() +} + +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(DeviceInfo, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetInfo() +} + +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(DeviceInfo, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetInfoWithDeferred() +} + +func GetDriverVersion() (string, error) { + task := TaskCreate(DeviceVersion) + if task == nil { + return "", fmt.Errorf("Can't create DeviceVersion task") + } + if err := task.Run(); err != nil { + return "", err + } + return task.GetDriverVersion() +} + +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(DeviceStatus, name) + if task == nil { + logrus.Debugf("GetStatus: Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.Run(); err != nil { + logrus.Debugf("GetStatus: Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.GetInfo() + if err != nil { + logrus.Debugf("GetStatus: Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("GetStatus: Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) + } + + _, start, length, targetType, params := task.GetNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(DeviceTable, name) + if task == nil { + logrus.Debugf("GetTable: Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.Run(); err != nil { + logrus.Debugf("GetTable: Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.GetInfo() + if err != nil { + logrus.Debugf("GetTable: Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("GetTable: Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) + } + + _, start, length, targetType, params := task.GetNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +func SetTransactionId(poolName string, oldId uint64, newId uint64) error { + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running SetTransactionId %s", err) + } + return nil +} + +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(DeviceSuspend, name) + if task == nil { + return err + } + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceSuspend %s", err) + } + return nil +} + +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(DeviceResume, name) + if task == nil { + return err + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceResume %s", err) + } + + return nil +} + +func CreateDevice(poolName string, deviceId int) error { + logrus.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, deviceId) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.Run(); err != nil { + // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIdExists + } + + return fmt.Errorf("Error running CreateDevice %s", err) + + } + return nil +} + +func DeleteDevice(poolName string, deviceId int) error { + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeleteDevice %s", err) + } + return nil +} + +func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { + return activateDevice(poolName, name, deviceId, size, "") +} + +func ActivateDeviceWithExternal(poolName string, name string, deviceId int, size uint64, external string) error { + return activateDevice(poolName, name, deviceId, size, external) +} + +func activateDevice(poolName string, name string, deviceId int, size uint64, external string) error { + task, err := TaskCreateNamed(DeviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceId, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceId) + } + if err := task.AddTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + if err := task.SetAddNode(AddNodeOnCreate); err != nil { + return fmt.Errorf("Can't add node %s", err) + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + defer UdevWait(&cookie) + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +func CreateSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + ResumeDevice(baseName) + } + return err + } + + if err := task.SetSector(0); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.Run(); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIdExists + } + + return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) + + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/devicemapper/devmapper_log.go b/pkg/devicemapper/devmapper_log.go new file mode 100644 index 00000000..f66a2088 --- /dev/null +++ b/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,34 @@ +// +build linux + +package devicemapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) + } +} diff --git a/pkg/devicemapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 00000000..87c20037 --- /dev/null +++ b/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,278 @@ +// +build linux + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for defines, maybe we can remove it? +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import "unsafe" + +type ( + CDmTask C.struct_dm_task + + CLoopInfo64 C.struct_loop_info64 + LoopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncrypt_type uint32 + loEncrypt_key_size uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 + } +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD + + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) + +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *CDmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *CDmTask { + return (*CDmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *CDmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *CDmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *CDmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *CDmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *CDmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *CDmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range Cdeps.device { + deps.Device = append(deps.Device, (uint64)(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *CDmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *CDmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *CDmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 00000000..ced482c9 --- /dev/null +++ b/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,33 @@ +// +build linux,!libdm_no_deferred_remove + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +*/ +import "C" + +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *CDmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *CDmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 00000000..16631bf1 --- /dev/null +++ b/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,14 @@ +// +build linux,libdm_no_deferred_remove + +package devicemapper + +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *CDmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *CDmTask, info *Info) int { + return -1 +} diff --git a/pkg/devicemapper/ioctl.go b/pkg/devicemapper/ioctl.go new file mode 100644 index 00000000..f97e9d16 --- /dev/null +++ b/pkg/devicemapper/ioctl.go @@ -0,0 +1,72 @@ +// +build linux + +package devicemapper + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { + loopInfo := &LoopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/pkg/devicemapper/log.go b/pkg/devicemapper/log.go new file mode 100644 index 00000000..cee5e545 --- /dev/null +++ b/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/pkg/directory/directory_linux.go b/pkg/directory/directory_linux.go new file mode 100644 index 00000000..80fb9a83 --- /dev/null +++ b/pkg/directory/directory_linux.go @@ -0,0 +1,39 @@ +// +build linux + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/pkg/directory/directory_test.go b/pkg/directory/directory_test.go new file mode 100644 index 00000000..a8da1ac6 --- /dev/null +++ b/pkg/directory/directory_test.go @@ -0,0 +1,137 @@ +package directory + +import ( + "io/ioutil" + "os" + "testing" +) + +// Size of an empty directory should be 0 +func TestSizeEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("empty directory has size: %d", size) + } +} + +// Size of a directory with one empty file should be 0 +func TestSizeEmptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + var size int64 + if size, _ = Size(file.Name()); size != 0 { + t.Fatalf("directory with one file has size: %d", size) + } +} + +// Size of a directory with one 5-byte file should be 5 +func TestSizeNonemptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{97, 98, 99, 100, 101} + file.Write(d) + + var size int64 + if size, _ = Size(file.Name()); size != 5 { + t.Fatalf("directory with one 5-byte file has size: %d", size) + } +} + +// Size of a directory with one empty directory should be 0 +func TestSizeNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("directory with one empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 empty directory +func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{100, 111, 99, 107, 101, 114} + file.Write(d) + + var size int64 + if size, _ = Size(dir); size != 6 { + t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 non-empty directory +func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { + var dir, dirNested string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + data := []byte{100, 111, 99, 107, 101, 114} + file.Write(data) + + var nestedFile *os.File + if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil { + t.Fatalf("failed to create file in nested directory: %s", err) + } + + nestedData := []byte{100, 111, 99, 107, 101, 114} + nestedFile.Write(nestedData) + + var size int64 + if size, _ = Size(dir); size != 12 { + t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) + } +} diff --git a/pkg/directory/directory_windows.go b/pkg/directory/directory_windows.go new file mode 100644 index 00000000..7a9f8cb6 --- /dev/null +++ b/pkg/directory/directory_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/pkg/fileutils/fileutils.go b/pkg/fileutils/fileutils.go new file mode 100644 index 00000000..3eaf7f89 --- /dev/null +++ b/pkg/fileutils/fileutils.go @@ -0,0 +1,196 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" +) + +// exclusion return true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty return true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, "/")) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doen't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, "/") + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := filepath.Match(pattern, file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = filepath.Match(strings.Join(patDirs[i], "/"), + strings.Join(parentPathDirs[:len(patDirs[i])], "/")) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and remove +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/pkg/fileutils/fileutils_test.go b/pkg/fileutils/fileutils_test.go new file mode 100644 index 00000000..b544ffbf --- /dev/null +++ b/pkg/fileutils/fileutils_test.go @@ -0,0 +1,402 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} diff --git a/pkg/graphdb/conn_sqlite3.go b/pkg/graphdb/conn_sqlite3.go new file mode 100644 index 00000000..47c6b720 --- /dev/null +++ b/pkg/graphdb/conn_sqlite3.go @@ -0,0 +1,13 @@ +package graphdb + +import "database/sql" + +// NewSqliteConn opens a connection to a sqlite +// database. +func NewSqliteConn(root string) (*Database, error) { + conn, err := sql.Open("sqlite3", root) + if err != nil { + return nil, err + } + return NewDatabase(conn) +} diff --git a/pkg/graphdb/conn_sqlite3_unix.go b/pkg/graphdb/conn_sqlite3_unix.go new file mode 100644 index 00000000..7ccb6677 --- /dev/null +++ b/pkg/graphdb/conn_sqlite3_unix.go @@ -0,0 +1,5 @@ +// +build cgo,!windows + +package graphdb + +import _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite diff --git a/pkg/graphdb/conn_sqlite3_windows.go b/pkg/graphdb/conn_sqlite3_windows.go new file mode 100644 index 00000000..fe56e1b2 --- /dev/null +++ b/pkg/graphdb/conn_sqlite3_windows.go @@ -0,0 +1,5 @@ +// +build cgo,windows + +package graphdb + +import _ "github.com/mattn/go-sqlite3" // registers sqlite diff --git a/pkg/graphdb/conn_unsupported.go b/pkg/graphdb/conn_unsupported.go new file mode 100644 index 00000000..38950516 --- /dev/null +++ b/pkg/graphdb/conn_unsupported.go @@ -0,0 +1,7 @@ +// +build !cgo + +package graphdb + +func NewSqliteConn(root string) (*Database, error) { + panic("Not implemented") +} diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go new file mode 100644 index 00000000..93cb0291 --- /dev/null +++ b/pkg/graphdb/graphdb.go @@ -0,0 +1,550 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "path" + "strings" + "sync" +) + +const ( + createEntityTable = ` + CREATE TABLE IF NOT EXISTS entity ( + id text NOT NULL PRIMARY KEY + );` + + createEdgeTable = ` + CREATE TABLE IF NOT EXISTS edge ( + "entity_id" text NOT NULL, + "parent_id" text NULL, + "name" text NOT NULL, + CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), + CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") + ); + ` + + createEdgeIndices = ` + CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); + ` +) + +// Entity with a unique id +type Entity struct { + id string +} + +// An Edge connects two entities together +type Edge struct { + EntityID string + Name string + ParentID string +} + +// Entities stores the list of entities +type Entities map[string]*Entity + +// Edges stores the relationships between entities +type Edges []*Edge + +// WalkFunc is a function invoked to process an individual entity +type WalkFunc func(fullPath string, entity *Entity) error + +// Database is a graph database for storing entities and their relationships +type Database struct { + conn *sql.DB + mux sync.RWMutex +} + +// IsNonUniqueNameError processes the error to check if it's caused by +// a constraint violation. +// This is necessary because the error isn't the same across various +// sqlite versions. +func IsNonUniqueNameError(err error) bool { + str := err.Error() + // sqlite 3.7.17-1ubuntu1 returns: + // Set failure: Abort due to constraint violation: columns parent_id, name are not unique + if strings.HasSuffix(str, "name are not unique") { + return true + } + // sqlite-3.8.3-1.fc20 returns: + // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name + if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { + return true + } + // sqlite-3.6.20-1.el6 returns: + // Set failure: Abort due to constraint violation: constraint failed + if strings.HasSuffix(str, "constraint failed") { + return true + } + return false +} + +// NewDatabase creates a new graph database initialized with a root entity +func NewDatabase(conn *sql.DB) (*Database, error) { + if conn == nil { + return nil, fmt.Errorf("Database connection cannot be nil") + } + db := &Database{conn: conn} + + // Create root entities + tx, err := conn.Begin() + if err != nil { + return nil, err + } + + if _, err := tx.Exec(createEntityTable); err != nil { + return nil, err + } + if _, err := tx.Exec(createEdgeTable); err != nil { + return nil, err + } + if _, err := tx.Exec(createEdgeIndices); err != nil { + return nil, err + } + + if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { + tx.Rollback() + return nil, err + } + + if err := tx.Commit(); err != nil { + return nil, err + } + + return db, nil +} + +// Close the underlying connection to the database +func (db *Database) Close() error { + return db.conn.Close() +} + +// Set the entity id for a given path +func (db *Database) Set(fullPath, id string) (*Entity, error) { + db.mux.Lock() + defer db.mux.Unlock() + + tx, err := db.conn.Begin() + if err != nil { + return nil, err + } + + var entityID string + if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { + if err == sql.ErrNoRows { + if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { + tx.Rollback() + return nil, err + } + } else { + tx.Rollback() + return nil, err + } + } + e := &Entity{id} + + parentPath, name := splitPath(fullPath) + if err := db.setEdge(parentPath, name, e, tx); err != nil { + tx.Rollback() + return nil, err + } + + if err := tx.Commit(); err != nil { + return nil, err + } + return e, nil +} + +// Exists returns true if a name already exists in the database +func (db *Database) Exists(name string) bool { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return false + } + return e != nil +} + +func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error { + parent, err := db.get(parentPath) + if err != nil { + return err + } + if parent.id == e.id { + return fmt.Errorf("Cannot set self as child") + } + + if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { + return err + } + return nil +} + +// RootEntity returns the root "/" entity for the database +func (db *Database) RootEntity() *Entity { + return &Entity{ + id: "0", + } +} + +// Get returns the entity for a given path +func (db *Database) Get(name string) *Entity { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil + } + return e +} + +func (db *Database) get(name string) (*Entity, error) { + e := db.RootEntity() + // We always know the root name so return it if + // it is requested + if name == "/" { + return e, nil + } + + parts := split(name) + for i := 1; i < len(parts); i++ { + p := parts[i] + if p == "" { + continue + } + + next := db.child(e, p) + if next == nil { + return nil, fmt.Errorf("Cannot find child for %s", name) + } + e = next + } + return e, nil + +} + +// List all entities by from the name +// The key will be the full path of the entity +func (db *Database) List(name string, depth int) Entities { + db.mux.RLock() + defer db.mux.RUnlock() + + out := Entities{} + e, err := db.get(name) + if err != nil { + return out + } + + children, err := db.children(e, name, depth, nil) + if err != nil { + return out + } + + for _, c := range children { + out[c.FullPath] = c.Entity + } + return out +} + +// Walk through the child graph of an entity, calling walkFunc for each child entity. +// It is safe for walkFunc to call graph functions. +func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { + children, err := db.Children(name, depth) + if err != nil { + return err + } + + // Note: the database lock must not be held while calling walkFunc + for _, c := range children { + if err := walkFunc(c.FullPath, c.Entity); err != nil { + return err + } + } + return nil +} + +// Children returns the children of the specified entity +func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + + return db.children(e, name, depth, nil) +} + +// Parents returns the parents of a specified entity +func (db *Database) Parents(name string) ([]string, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + return db.parents(e) +} + +// Refs returns the refrence count for a specified id +func (db *Database) Refs(id string) int { + db.mux.RLock() + defer db.mux.RUnlock() + + var count int + if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { + return 0 + } + return count +} + +// RefPaths returns all the id's path references +func (db *Database) RefPaths(id string) Edges { + db.mux.RLock() + defer db.mux.RUnlock() + + refs := Edges{} + + rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) + if err != nil { + return refs + } + defer rows.Close() + + for rows.Next() { + var name string + var parentID string + if err := rows.Scan(&name, &parentID); err != nil { + return refs + } + refs = append(refs, &Edge{ + EntityID: id, + Name: name, + ParentID: parentID, + }) + } + return refs +} + +// Delete the reference to an entity at a given path +func (db *Database) Delete(name string) error { + db.mux.Lock() + defer db.mux.Unlock() + + if name == "/" { + return fmt.Errorf("Cannot delete root entity") + } + + parentPath, n := splitPath(name) + parent, err := db.get(parentPath) + if err != nil { + return err + } + + if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { + return err + } + return nil +} + +// Purge removes the entity with the specified id +// Walk the graph to make sure all references to the entity +// are removed and return the number of references removed +func (db *Database) Purge(id string) (int, error) { + db.mux.Lock() + defer db.mux.Unlock() + + tx, err := db.conn.Begin() + if err != nil { + return -1, err + } + + // Delete all edges + rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id) + if err != nil { + tx.Rollback() + return -1, err + } + changes, err := rows.RowsAffected() + if err != nil { + return -1, err + } + + // Clear who's using this id as parent + refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id) + if err != nil { + tx.Rollback() + return -1, err + } + refsCount, err := refs.RowsAffected() + if err != nil { + return -1, err + } + + // Delete entity + if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { + tx.Rollback() + return -1, err + } + + if err := tx.Commit(); err != nil { + return -1, err + } + + return int(changes + refsCount), nil +} + +// Rename an edge for a given path +func (db *Database) Rename(currentName, newName string) error { + db.mux.Lock() + defer db.mux.Unlock() + + parentPath, name := splitPath(currentName) + newParentPath, newEdgeName := splitPath(newName) + + if parentPath != newParentPath { + return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) + } + + parent, err := db.get(parentPath) + if err != nil { + return err + } + + rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) + if err != nil { + return err + } + i, err := rows.RowsAffected() + if err != nil { + return err + } + if i == 0 { + return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) + } + return nil +} + +type WalkMeta struct { + Parent *Entity + Entity *Entity + FullPath string + Edge *Edge +} + +func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { + if e == nil { + return entities, nil + } + + rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var entityID, entityName string + if err := rows.Scan(&entityID, &entityName); err != nil { + return nil, err + } + child := &Entity{entityID} + edge := &Edge{ + ParentID: e.id, + Name: entityName, + EntityID: child.id, + } + + meta := WalkMeta{ + Parent: e, + Entity: child, + FullPath: path.Join(name, edge.Name), + Edge: edge, + } + + entities = append(entities, meta) + + if depth != 0 { + nDepth := depth + if depth != -1 { + nDepth-- + } + entities, err = db.children(child, meta.FullPath, nDepth, entities) + if err != nil { + return nil, err + } + } + } + + return entities, nil +} + +func (db *Database) parents(e *Entity) (parents []string, err error) { + if e == nil { + return parents, nil + } + + rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var parentID string + if err := rows.Scan(&parentID); err != nil { + return nil, err + } + parents = append(parents, parentID) + } + + return parents, nil +} + +// Return the entity based on the parent path and name +func (db *Database) child(parent *Entity, name string) *Entity { + var id string + if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { + return nil + } + return &Entity{id} +} + +// ID returns the id used to reference this entity +func (e *Entity) ID() string { + return e.id +} + +// Paths returns the paths sorted by depth +func (e Entities) Paths() []string { + out := make([]string, len(e)) + var i int + for k := range e { + out[i] = k + i++ + } + sortByDepth(out) + + return out +} diff --git a/pkg/graphdb/graphdb_test.go b/pkg/graphdb/graphdb_test.go new file mode 100644 index 00000000..6912ab60 --- /dev/null +++ b/pkg/graphdb/graphdb_test.go @@ -0,0 +1,657 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "os" + "path" + "strconv" + "testing" + + _ "code.google.com/p/gosqlite/sqlite3" +) + +func newTestDb(t *testing.T) (*Database, string) { + p := path.Join(os.TempDir(), "sqlite.db") + conn, err := sql.Open("sqlite3", p) + db, err := NewDatabase(conn) + if err != nil { + t.Fatal(err) + } + return db, p +} + +func destroyTestDb(dbPath string) { + os.Remove(dbPath) +} + +func TestNewDatabase(t *testing.T) { + db, dbpath := newTestDb(t) + if db == nil { + t.Fatal("Database should not be nil") + } + db.Close() + defer destroyTestDb(dbpath) +} + +func TestCreateRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + root := db.RootEntity() + if root == nil { + t.Fatal("Root entity should not be nil") + } +} + +func TestGetRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + e := db.Get("/") + if e == nil { + t.Fatal("Entity should not be nil") + } + if e.ID() != "0" { + t.Fatalf("Entity id should be 0, got %s", e.ID()) + } +} + +func TestSetEntityWithDifferentName(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/test", "1") + if _, err := db.Set("/other", "1"); err != nil { + t.Fatal(err) + } +} + +func TestSetDuplicateEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/foo", "42"); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/foo", "43"); err == nil { + t.Fatalf("Creating an entry with a duplicate path did not cause an error") + } +} + +func TestCreateChild(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/db", "1") + if err != nil { + t.Fatal(err) + } + if child == nil { + t.Fatal("Child should not be nil") + } + if child.ID() != "1" { + t.Fail() + } +} + +func TestParents(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + + for i := 6; i < 11; i++ { + a := strconv.Itoa(i) + p := strconv.Itoa(i - 5) + + key := fmt.Sprintf("/%s/%s", p, a) + + if _, err := db.Set(key, a); err != nil { + t.Fatal(err) + } + + parents, err := db.Parents(key) + if err != nil { + t.Fatal(err) + } + + if len(parents) != 1 { + t.Fatalf("Expected 1 entry for %s got %d", key, len(parents)) + } + + if parents[0] != p { + t.Fatalf("ID %s received, %s expected", parents[0], p) + } + } +} + +func TestChildren(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + str := "/" + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + + str = "/" + for i := 10; i < 30; i++ { // 20 entities + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + entries, err := db.Children("/", 5) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 11 { + t.Fatalf("Expect 11 entries for / got %d", len(entries)) + } + + entries, err = db.Children("/", 20) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 25 { + t.Fatalf("Expect 25 entries for / got %d", len(entries)) + } +} + +func TestListAllRootChildren(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + entries := db.List("/", -1) + if len(entries) != 5 { + t.Fatalf("Expect 5 entries for / got %d", len(entries)) + } +} + +func TestListAllSubChildren(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + entries := db.List("/webapp", 1) + if len(entries) != 3 { + t.Fatalf("Expect 3 entries for / got %d", len(entries)) + } + + entries = db.List("/webapp", 0) + if len(entries) != 2 { + t.Fatalf("Expect 2 entries for / got %d", len(entries)) + } +} + +func TestAddSelfAsChild(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/test", "1") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/test/other", child.ID()); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestAddChildToNonExistantRoot(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestWalkAll(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/db/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Walk("/", func(p string, e *Entity) error { + t.Logf("Path: %s Entity: %s", p, e.ID()) + return nil + }, -1); err != nil { + t.Fatal(err) + } +} + +func TestGetEntityByPath(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + entity := db.Get("/webapp/db/logs") + if entity == nil { + t.Fatal("Entity should not be nil") + } + if entity.ID() != "4" { + t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) + } +} + +func TestEnitiesPaths(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + out := db.List("/", -1) + for _, p := range out.Paths() { + t.Log(p) + } +} + +func TestDeleteRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if err := db.Delete("/"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestDeleteEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Delete("/webapp/sentry"); err != nil { + t.Fatal(err) + } + entity := db.Get("/webapp/sentry") + if entity != nil { + t.Fatal("Entity /webapp/sentry should be nil") + } +} + +func TestCountRefs(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + if db.Refs("2") != 2 { + t.Fatal("Expect reference count to be 2") + } +} + +func TestPurgeId(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expect reference count to be 1, got %d", c) + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Fatalf("Expected 2 references to be removed, got %d", count) + } +} + +// Regression test https://github.com/docker/docker/issues/12334 +func TestPurgeIdRefPaths(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + db.Set("/db", "2") + + db.Set("/db/webapp", "1") + + if c := db.Refs("1"); c != 2 { + t.Fatalf("Expected 2 reference for webapp, got %d", c) + } + if c := db.Refs("2"); c != 1 { + t.Fatalf("Expected 1 reference for db, got %d", c) + } + + if rp := db.RefPaths("2"); len(rp) != 1 { + t.Fatalf("Expected 1 reference path for db, got %d", len(rp)) + } + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + + if count != 2 { + t.Fatalf("Expected 2 rows to be removed, got %d", count) + } + + if c := db.Refs("2"); c != 0 { + t.Fatalf("Expected 0 reference for db, got %d", c) + } + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expected 1 reference for webapp, got %d", c) + } +} + +func TestRename(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + if db.Get("/webapp/db") == nil { + t.Fatal("Cannot find entity at path /webapp/db") + } + + if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { + t.Fatal(err) + } + if db.Get("/webapp/db") != nil { + t.Fatal("Entity should not exist at /webapp/db") + } + if db.Get("/webapp/newdb") == nil { + t.Fatal("Cannot find entity at path /webapp/newdb") + } + +} + +func TestCreateMultipleNames(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/db", "1") + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + db.Walk("/", func(p string, e *Entity) error { + t.Logf("%s\n", p) + return nil + }, -1) +} + +func TestRefPaths(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + refs := db.RefPaths("2") + if len(refs) != 2 { + t.Fatalf("Expected reference count to be 2, got %d", len(refs)) + } +} + +func TestExistsTrue(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/testing", "1") + + if !db.Exists("/testing") { + t.Fatalf("/tesing should exist") + } +} + +func TestExistsFalse(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/toerhe", "1") + + if db.Exists("/testing") { + t.Fatalf("/tesing should not exist") + } + +} + +func TestGetNameWithTrailingSlash(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/todo", "1") + + e := db.Get("/todo/") + if e == nil { + t.Fatalf("Entity should not be nil") + } +} + +func TestConcurrentWrites(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + errs := make(chan error, 2) + + save := func(name string, id string) { + if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { + errs <- err + } + errs <- nil + } + purge := func(id string) { + if _, err := db.Purge(id); err != nil { + errs <- err + } + errs <- nil + } + + save("/1", "1") + + go purge("1") + go save("/2", "2") + + any := false + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + any = true + t.Log(err) + } + } + if any { + t.Fail() + } +} diff --git a/pkg/graphdb/sort.go b/pkg/graphdb/sort.go new file mode 100644 index 00000000..c07df077 --- /dev/null +++ b/pkg/graphdb/sort.go @@ -0,0 +1,27 @@ +package graphdb + +import "sort" + +type pathSorter struct { + paths []string + by func(i, j string) bool +} + +func sortByDepth(paths []string) { + s := &pathSorter{paths, func(i, j string) bool { + return PathDepth(i) > PathDepth(j) + }} + sort.Sort(s) +} + +func (s *pathSorter) Len() int { + return len(s.paths) +} + +func (s *pathSorter) Swap(i, j int) { + s.paths[i], s.paths[j] = s.paths[j], s.paths[i] +} + +func (s *pathSorter) Less(i, j int) bool { + return s.by(s.paths[i], s.paths[j]) +} diff --git a/pkg/graphdb/sort_test.go b/pkg/graphdb/sort_test.go new file mode 100644 index 00000000..ddf2266f --- /dev/null +++ b/pkg/graphdb/sort_test.go @@ -0,0 +1,29 @@ +package graphdb + +import ( + "testing" +) + +func TestSort(t *testing.T) { + paths := []string{ + "/", + "/myreallylongname", + "/app/db", + } + + sortByDepth(paths) + + if len(paths) != 3 { + t.Fatalf("Expected 3 parts got %d", len(paths)) + } + + if paths[0] != "/app/db" { + t.Fatalf("Expected /app/db got %s", paths[0]) + } + if paths[1] != "/myreallylongname" { + t.Fatalf("Expected /myreallylongname got %s", paths[1]) + } + if paths[2] != "/" { + t.Fatalf("Expected / got %s", paths[2]) + } +} diff --git a/pkg/graphdb/utils.go b/pkg/graphdb/utils.go new file mode 100644 index 00000000..9edd79c3 --- /dev/null +++ b/pkg/graphdb/utils.go @@ -0,0 +1,32 @@ +package graphdb + +import ( + "path" + "strings" +) + +// Split p on / +func split(p string) []string { + return strings.Split(p, "/") +} + +// PathDepth returns the depth or number of / in a given path +func PathDepth(p string) int { + parts := split(p) + if len(parts) == 2 && parts[1] == "" { + return 1 + } + return len(parts) +} + +func splitPath(p string) (parent, name string) { + if p[0] != '/' { + p = "/" + p + } + parent, name = path.Split(p) + l := len(parent) + if parent[l-1] == '/' { + parent = parent[:l-1] + } + return +} diff --git a/pkg/homedir/homedir.go b/pkg/homedir/homedir.go new file mode 100644 index 00000000..8154e83f --- /dev/null +++ b/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/pkg/homedir/homedir_test.go b/pkg/homedir/homedir_test.go new file mode 100644 index 00000000..7a95cb2b --- /dev/null +++ b/pkg/homedir/homedir_test.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "path/filepath" + "testing" +) + +func TestGet(t *testing.T) { + home := Get() + if home == "" { + t.Fatal("returned home directory is empty") + } + + if !filepath.IsAbs(home) { + t.Fatalf("returned path is not absolute: %s", home) + } +} + +func TestGetShortcutString(t *testing.T) { + shortcut := GetShortcutString() + if shortcut == "" { + t.Fatal("returned shortcut string is empty") + } +} diff --git a/pkg/httputils/httputils.go b/pkg/httputils/httputils.go new file mode 100644 index 00000000..f1e5dcd1 --- /dev/null +++ b/pkg/httputils/httputils.go @@ -0,0 +1,58 @@ +package httputils + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/jsonmessage" +) + +// Download requests a given URL and returns an io.Reader +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +// NewHTTPRequestError returns a JSON response error +func NewHTTPRequestError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} + +type ServerHeader struct { + App string // docker + Ver string // 1.8.0-dev + OS string // windows or linux +} + +// parseServerHeader extracts pieces from am HTTP server header +// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows) +func ParseServerHeader(hdr string) (*ServerHeader, error) { + re := regexp.MustCompile(`.*\((.+)\).*$`) + r := &ServerHeader{} + if matches := re.FindStringSubmatch(hdr); matches != nil { + r.OS = matches[1] + parts := strings.Split(hdr, "/") + if len(parts) != 2 { + return nil, errors.New("Bad header: '/' missing") + } + r.App = parts[0] + v := strings.Split(parts[1], " ") + if len(v) != 2 { + return nil, errors.New("Bad header: Expected single space") + } + r.Ver = v[0] + return r, nil + } + return nil, errors.New("Bad header: Failed regex match") +} diff --git a/pkg/httputils/mimetype.go b/pkg/httputils/mimetype.go new file mode 100644 index 00000000..5d1aee40 --- /dev/null +++ b/pkg/httputils/mimetype.go @@ -0,0 +1,29 @@ +package httputils + +import ( + "mime" + "net/http" +) + +var MimeTypes = struct { + TextPlain string + Tar string + OctetStream string +}{"text/plain", "application/tar", "application/octet-stream"} + +// DetectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func DetectContentType(c []byte) (string, map[string]string, error) { + + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + + return contentType, args, nil +} diff --git a/pkg/httputils/resumablerequestreader.go b/pkg/httputils/resumablerequestreader.go new file mode 100644 index 00000000..bebc8608 --- /dev/null +++ b/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,95 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +// ResumableRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/pkg/httputils/resumablerequestreader_test.go b/pkg/httputils/resumablerequestreader_test.go new file mode 100644 index 00000000..35338600 --- /dev/null +++ b/pkg/httputils/resumablerequestreader_test.go @@ -0,0 +1,83 @@ +package httputils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResumableRequestReader(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + resreq := ResumableRequestReader(client, req, retries, imgSize) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReaderWithInitialResponse(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + res, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + + resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} diff --git a/pkg/ioutils/fmt.go b/pkg/ioutils/fmt.go new file mode 100644 index 00000000..801132ff --- /dev/null +++ b/pkg/ioutils/fmt.go @@ -0,0 +1,14 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} diff --git a/pkg/ioutils/fmt_test.go b/pkg/ioutils/fmt_test.go new file mode 100644 index 00000000..89688632 --- /dev/null +++ b/pkg/ioutils/fmt_test.go @@ -0,0 +1,17 @@ +package ioutils + +import "testing" + +func TestFprintfIfNotEmpty(t *testing.T) { + wc := NewWriteCounter(&NopWriter{}) + n, _ := FprintfIfNotEmpty(wc, "foo%s", "") + + if wc.Count != 0 || n != 0 { + t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) + } + + n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") + if wc.Count != 6 || n != 6 { + t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) + } +} diff --git a/pkg/ioutils/multireader.go b/pkg/ioutils/multireader.go new file mode 100644 index 00000000..f231aa9d --- /dev/null +++ b/pkg/ioutils/multireader.go @@ -0,0 +1,226 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx += 1 + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + var rdr io.ReadSeeker + var rdrOffset int64 + + for i, rdr := range r.readers { + offsetTo, err := r.getOffsetToReader(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo > offset { + rdr = r.readers[i-1] + rdrOffset = offsetTo - offset + break + } + + if rdr == r.readers[len(r.readers)-1] { + rdrOffset = offsetTo + offset + break + } + } + + return rdr, rdrOffset, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bCap := int64(cap(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bCap) + if err != nil && err != io.EOF { + return -1, err + } + bCap -= readBytes + + if bCap == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/pkg/ioutils/multireader_test.go b/pkg/ioutils/multireader_test.go new file mode 100644 index 00000000..de495b56 --- /dev/null +++ b/pkg/ioutils/multireader_test.go @@ -0,0 +1,149 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} diff --git a/pkg/ioutils/readers.go b/pkg/ioutils/readers.go new file mode 100644 index 00000000..ff09baad --- /dev/null +++ b/pkg/ioutils/readers.go @@ -0,0 +1,254 @@ +package ioutils + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "io" + "math/big" + "sync" + "time" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// bufReader allows the underlying reader to continue to produce +// output by pre-emptively reading from the wrapped reader. +// This is achieved by buffering this data in bufReader's +// expanding buffer. +type bufReader struct { + sync.Mutex + buf *bytes.Buffer + reader io.Reader + err error + wait sync.Cond + drainBuf []byte + reuseBuf []byte + maxReuse int64 + resetTimeout time.Duration + bufLenResetThreshold int64 + maxReadDataReset int64 +} + +func NewBufReader(r io.Reader) *bufReader { + var timeout int + if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil { + timeout = int(randVal.Int64()) + 180 + } else { + timeout = 300 + } + reader := &bufReader{ + buf: &bytes.Buffer{}, + drainBuf: make([]byte, 1024), + reuseBuf: make([]byte, 4096), + maxReuse: 1000, + resetTimeout: time.Second * time.Duration(timeout), + bufLenResetThreshold: 100 * 1024, + maxReadDataReset: 10 * 1024 * 1024, + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { + reader := &bufReader{ + buf: buffer, + drainBuf: drainBuffer, + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func (r *bufReader) drain() { + var ( + duration time.Duration + lastReset time.Time + now time.Time + reset bool + bufLen int64 + dataSinceReset int64 + maxBufLen int64 + reuseBufLen int64 + reuseCount int64 + ) + reuseBufLen = int64(len(r.reuseBuf)) + lastReset = time.Now() + for { + n, err := r.reader.Read(r.drainBuf) + dataSinceReset += int64(n) + r.Lock() + bufLen = int64(r.buf.Len()) + if bufLen > maxBufLen { + maxBufLen = bufLen + } + + // Avoid unbounded growth of the buffer over time. + // This has been discovered to be the only non-intrusive + // solution to the unbounded growth of the buffer. + // Alternative solutions such as compression, multiple + // buffers, channels and other similar pieces of code + // were reducing throughput, overall Docker performance + // or simply crashed Docker. + // This solution releases the buffer when specific + // conditions are met to avoid the continuous resizing + // of the buffer for long lived containers. + // + // Move data to the front of the buffer if it's + // smaller than what reuseBuf can store + if bufLen > 0 && reuseBufLen >= bufLen { + n, _ := r.buf.Read(r.reuseBuf) + r.buf.Write(r.reuseBuf[0:n]) + // Take action if the buffer has been reused too many + // times and if there's data in the buffer. + // The timeout is also used as means to avoid doing + // these operations more often or less often than + // required. + // The various conditions try to detect heavy activity + // in the buffer which might be indicators of heavy + // growth of the buffer. + } else if reuseCount >= r.maxReuse && bufLen > 0 { + now = time.Now() + duration = now.Sub(lastReset) + timeoutReached := duration >= r.resetTimeout + + // The timeout has been reached and the + // buffered data couldn't be moved to the front + // of the buffer, so the buffer gets reset. + if timeoutReached && bufLen > reuseBufLen { + reset = true + } + // The amount of buffered data is too high now, + // reset the buffer. + if timeoutReached && maxBufLen >= r.bufLenResetThreshold { + reset = true + } + // Reset the buffer if a certain amount of + // data has gone through the buffer since the + // last reset. + if timeoutReached && dataSinceReset >= r.maxReadDataReset { + reset = true + } + // The buffered data is moved to a fresh buffer, + // swap the old buffer with the new one and + // reset all counters. + if reset { + newbuf := &bytes.Buffer{} + newbuf.ReadFrom(r.buf) + r.buf = newbuf + lastReset = now + reset = false + dataSinceReset = 0 + maxBufLen = 0 + reuseCount = 0 + } + } + if err != nil { + r.err = err + } else { + r.buf.Write(r.drainBuf[0:n]) + } + reuseCount++ + r.wait.Signal() + r.Unlock() + callSchedulerIfNecessary() + if err != nil { + break + } + } +} + +func (r *bufReader) Read(p []byte) (n int, err error) { + r.Lock() + defer r.Unlock() + for { + n, err = r.buf.Read(p) + if n > 0 { + return n, err + } + if r.err != nil { + return 0, r.err + } + r.wait.Wait() + } +} + +func (r *bufReader) Close() error { + closer, ok := r.reader.(io.ReadCloser) + if !ok { + return nil + } + return closer.Close() +} + +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} diff --git a/pkg/ioutils/readers_test.go b/pkg/ioutils/readers_test.go new file mode 100644 index 00000000..0a39b6ec --- /dev/null +++ b/pkg/ioutils/readers_test.go @@ -0,0 +1,216 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +// Implement io.Reader +type errorReader struct{} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, fmt.Errorf("Error reader always fail.") +} + +func TestReadCloserWrapperClose(t *testing.T) { + reader := strings.NewReader("A string reader") + wrapper := NewReadCloserWrapper(reader, func() error { + return fmt.Errorf("This will be called when closing") + }) + err := wrapper.Close() + if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { + t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") + } +} + +func TestReaderErrWrapperReadOnError(t *testing.T) { + called := false + reader := &errorReader{} + wrapper := NewReaderErrWrapper(reader, func() { + called = true + }) + _, err := wrapper.Read([]byte{}) + if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { + t.Fatalf("readErrWrapper should returned an error") + } + if !called { + t.Fatalf("readErrWrapper should have call the anonymous function on failure") + } +} + +func TestReaderErrWrapperRead(t *testing.T) { + reader := strings.NewReader("a string reader.") + wrapper := NewReaderErrWrapper(reader, func() { + t.Fatalf("readErrWrapper should not have called the anonymous function") + }) + // Read 20 byte (should be ok with the string above) + num, err := wrapper.Read(make([]byte, 20)) + if err != nil { + t.Fatal(err) + } + if num != 16 { + t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) + } +} + +func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) { + reader, writer := io.Pipe() + + drainBuffer := make([]byte, 1024) + buffer := bytes.Buffer{} + bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} + +func TestBufReader(t *testing.T) { + reader, writer := io.Pipe() + bufreader := NewBufReader(reader) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} + +func TestBufReaderCloseWithNonReaderCloser(t *testing.T) { + reader := strings.NewReader("buffer") + bufreader := NewBufReader(reader) + + if err := bufreader.Close(); err != nil { + t.Fatal(err) + } + +} + +// implements io.ReadCloser +type simpleReaderCloser struct{} + +func (r *simpleReaderCloser) Read(p []byte) (n int, err error) { + return 0, nil +} + +func (r *simpleReaderCloser) Close() error { + return nil +} + +func TestBufReaderCloseWithReaderCloser(t *testing.T) { + reader := &simpleReaderCloser{} + bufreader := NewBufReader(reader) + + err := bufreader.Close() + if err != nil { + t.Fatal(err) + } + +} + +func TestHashData(t *testing.T) { + reader := strings.NewReader("hash-me") + actual, err := HashData(reader) + if err != nil { + t.Fatal(err) + } + expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" + if actual != expected { + t.Fatalf("Expecting %s, got %s", expected, actual) + } +} + +type repeatedReader struct { + readCount int + maxReads int + data []byte +} + +func newRepeatedReader(max int, data []byte) *repeatedReader { + return &repeatedReader{0, max, data} +} + +func (r *repeatedReader) Read(p []byte) (int, error) { + if r.readCount >= r.maxReads { + return 0, io.EOF + } + r.readCount++ + n := copy(p, r.data) + return n, nil +} + +func testWithData(data []byte, reads int) { + reader := newRepeatedReader(reads, data) + bufReader := NewBufReader(reader) + io.Copy(ioutil.Discard, bufReader) +} + +func Benchmark1M10BytesReads(b *testing.B) { + reads := 1000000 + readSize := int64(10) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} + +func Benchmark1M1024BytesReads(b *testing.B) { + reads := 1000000 + readSize := int64(1024) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} + +func Benchmark10k32KBytesReads(b *testing.B) { + reads := 10000 + readSize := int64(32 * 1024) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} diff --git a/pkg/ioutils/scheduler.go b/pkg/ioutils/scheduler.go new file mode 100644 index 00000000..3c88f29e --- /dev/null +++ b/pkg/ioutils/scheduler.go @@ -0,0 +1,6 @@ +// +build !gccgo + +package ioutils + +func callSchedulerIfNecessary() { +} diff --git a/pkg/ioutils/scheduler_gccgo.go b/pkg/ioutils/scheduler_gccgo.go new file mode 100644 index 00000000..c11d02b9 --- /dev/null +++ b/pkg/ioutils/scheduler_gccgo.go @@ -0,0 +1,13 @@ +// +build gccgo + +package ioutils + +import ( + "runtime" +) + +func callSchedulerIfNecessary() { + //allow or force Go scheduler to switch context, without explicitly + //forcing this will make it hang when using gccgo implementation + runtime.Gosched() +} diff --git a/pkg/ioutils/writeflusher.go b/pkg/ioutils/writeflusher.go new file mode 100644 index 00000000..25095474 --- /dev/null +++ b/pkg/ioutils/writeflusher.go @@ -0,0 +1,47 @@ +package ioutils + +import ( + "io" + "net/http" + "sync" +) + +type WriteFlusher struct { + sync.Mutex + w io.Writer + flusher http.Flusher + flushed bool +} + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + wf.Lock() + defer wf.Unlock() + n, err = wf.w.Write(b) + wf.flushed = true + wf.flusher.Flush() + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + wf.Lock() + defer wf.Unlock() + wf.flushed = true + wf.flusher.Flush() +} + +func (wf *WriteFlusher) Flushed() bool { + wf.Lock() + defer wf.Unlock() + return wf.flushed +} + +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var flusher http.Flusher + if f, ok := w.(http.Flusher); ok { + flusher = f + } else { + flusher = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: flusher} +} diff --git a/pkg/ioutils/writers.go b/pkg/ioutils/writers.go new file mode 100644 index 00000000..43fdc44e --- /dev/null +++ b/pkg/ioutils/writers.go @@ -0,0 +1,60 @@ +package ioutils + +import "io" + +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +type NopFlusher struct{} + +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// Wrap a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/pkg/ioutils/writers_test.go b/pkg/ioutils/writers_test.go new file mode 100644 index 00000000..564b1cd4 --- /dev/null +++ b/pkg/ioutils/writers_test.go @@ -0,0 +1,65 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestWriteCloserWrapperClose(t *testing.T) { + called := false + writer := bytes.NewBuffer([]byte{}) + wrapper := NewWriteCloserWrapper(writer, func() error { + called = true + return nil + }) + if err := wrapper.Close(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("writeCloserWrapper should have call the anonymous function.") + } +} + +func TestNopWriteCloser(t *testing.T) { + writer := bytes.NewBuffer([]byte{}) + wrapper := NopWriteCloser(writer) + if err := wrapper.Close(); err != nil { + t.Fatal("NopWriteCloser always return nil on Close.") + } + +} + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go new file mode 100644 index 00000000..edcf7643 --- /dev/null +++ b/pkg/jsonlog/jsonlog.go @@ -0,0 +1,30 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "time" +) + +type JSONLog struct { + Log string `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created time.Time `json:"time"` +} + +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil +} + +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} diff --git a/pkg/jsonlog/jsonlog_marshalling.go b/pkg/jsonlog/jsonlog_marshalling.go new file mode 100644 index 00000000..6bfec482 --- /dev/null +++ b/pkg/jsonlog/jsonlog_marshalling.go @@ -0,0 +1,182 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make BIND_DIR=. shell +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +//+ +//+ "github.com/docker/docker/pkg/timeutils" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = timeutils.FastMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } +// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// if len(mj.Log) != 0 { +// - if first == true { +// - first = false +// - } else { +// - buf.WriteString(`,`) +// - } +// + first = false +// buf.WriteString(`"log":`) +// ffjson_WriteJsonString(buf, mj.Log) +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" + + "github.com/docker/docker/pkg/timeutils" +) + +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + if err := mj.MarshalJSONBuf(&buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first bool = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjson_WriteJsonString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjson_WriteJsonString(buf, mj.Stream) + } + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = timeutils.FastMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjson_WriteJsonString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff --git a/pkg/jsonlog/jsonlog_marshalling_test.go b/pkg/jsonlog/jsonlog_marshalling_test.go new file mode 100644 index 00000000..5e455685 --- /dev/null +++ b/pkg/jsonlog/jsonlog_marshalling_test.go @@ -0,0 +1,34 @@ +package jsonlog + +import ( + "regexp" + "testing" +) + +func TestJSONLogMarshalJSON(t *testing.T) { + logs := map[JSONLog]string{ + JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, + JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, + JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, + JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, + JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, + JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, + JSONLog{}: `^{\"time\":\".{20,}\"}$`, + // These ones are a little weird + JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, + JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, + JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, + } + for jsonLog, expression := range logs { + data, err := jsonLog.MarshalJSON() + if err != nil { + t.Fatal(err) + } + res := string(data) + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/pkg/jsonlog/jsonlogbytes.go b/pkg/jsonlog/jsonlogbytes.go new file mode 100644 index 00000000..81a966b9 --- /dev/null +++ b/pkg/jsonlog/jsonlogbytes.go @@ -0,0 +1,111 @@ +package jsonlog + +import ( + "bytes" + "unicode/utf8" +) + +// JSONLogBytes is based on JSONLog. +// It allows marshalling JSONLog from Log as []byte +// and an already marshalled Created timestamp. +type JSONLogBytes struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created string `json:"time"` +} + +// MarshalJSONBuf is based on the same method from JSONLog +// It has been modified to take into account the necessary changes. +func (mj *JSONLogBytes) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjson_WriteJsonBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjson_WriteJsonString(buf, mj.Stream) + } + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + buf.WriteString(mj.Created) + buf.WriteString(`}`) + return nil +} + +// This is based on ffjson_WriteJsonString. It has been changed +// to accept a string passed as a slice of bytes. +func ffjson_WriteJsonBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/pkg/jsonlog/jsonlogbytes_test.go b/pkg/jsonlog/jsonlogbytes_test.go new file mode 100644 index 00000000..46ec52c6 --- /dev/null +++ b/pkg/jsonlog/jsonlogbytes_test.go @@ -0,0 +1,37 @@ +package jsonlog + +import ( + "bytes" + "regexp" + "testing" +) + +func TestJSONLogBytesMarshalJSONBuf(t *testing.T) { + logs := map[*JSONLogBytes]string{ + &JSONLogBytes{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, + &JSONLogBytes{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, + &JSONLogBytes{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, + &JSONLogBytes{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, + &JSONLogBytes{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, + &JSONLogBytes{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, + &JSONLogBytes{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, + &JSONLogBytes{Created: "time"}: `^{\"time\":time}$`, + &JSONLogBytes{}: `^{\"time\":}$`, + // These ones are a little weird + &JSONLogBytes{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, + &JSONLogBytes{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, + &JSONLogBytes{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, + } + for jsonLog, expression := range logs { + var buf bytes.Buffer + if err := jsonLog.MarshalJSONBuf(&buf); err != nil { + t.Fatal(err) + } + res := buf.String() + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/pkg/jsonmessage/jsonmessage.go b/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 00000000..7db1626e --- /dev/null +++ b/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,172 @@ +package jsonmessage + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/timeutils" + "github.com/docker/docker/pkg/units" +) + +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +type JSONProgress struct { + terminalFd uintptr + Current int `json:"current,omitempty"` + Total int `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` +} + +func (p *JSONProgress) String() string { + var ( + width = 200 + pbBox string + numbersBox string + timeLeftBox string + ) + + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + width = int(ws.Width) + } + + if p.Current <= 0 && p.Total <= 0 { + return "" + } + current := units.HumanSize(float64(p.Current)) + if p.Total <= 0 { + return fmt.Sprintf("%8v", current) + } + total := units.HumanSize(float64(p.Total)) + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negetive gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated +} + +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("Authentication is required.") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + // [2K = erase entire current line + fmt.Fprintf(out, "%c[2K\r", 27) + endl = "\r" + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + diff = 0 + ) + for { + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + line = len(ids) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + diff = 0 + } else { + diff = len(ids) - line + } + if jm.ID != "" && isTerminal { + // [{diff}A = move cursor up diff rows + fmt.Fprintf(out, "%c[%dA", 27, diff) + } + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + // [{diff}B = move cursor down diff rows + fmt.Fprintf(out, "%c[%dB", 27, diff) + } + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/jsonmessage/jsonmessage_test.go b/pkg/jsonmessage/jsonmessage_test.go new file mode 100644 index 00000000..2e78fa7e --- /dev/null +++ b/pkg/jsonmessage/jsonmessage_test.go @@ -0,0 +1,210 @@ +package jsonmessage + +import ( + "bytes" + "fmt" + "testing" + "time" + + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/timeutils" + "strings" +) + +func TestError(t *testing.T) { + je := JSONError{404, "Not found"} + if je.Error() != "Not found" { + t.Fatalf("Expected 'Not found' got '%s'", je.Error()) + } +} + +func TestProgress(t *testing.T) { + jp := JSONProgress{} + if jp.String() != "" { + t.Fatalf("Expected empty string, got '%s'", jp.String()) + } + + expected := " 1 B" + jp2 := JSONProgress{Current: 1} + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) + } + + expectedStart := "[==========> ] 20 B/100 B" + jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} + // Just look at the start of the string + // (the remaining time is really hard to test -_-) + if jp3.String()[:len(expectedStart)] != expectedStart { + t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) + } + + expected = "[=========================> ] 50 B/100 B" + jp4 := JSONProgress{Current: 50, Total: 100} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) + } + + // this number can't be negative gh#7136 + expected = "[==================================================>] 50 B/40 B" + jp5 := JSONProgress{Current: 50, Total: 40} + if jp5.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp5.String()) + } +} + +func TestJSONMessageDisplay(t *testing.T) { + now := time.Now().Unix() + messages := map[JSONMessage][]string{ + // Empty + JSONMessage{}: {"\n", "\n"}, + // Status + JSONMessage{ + Status: "status", + }: { + "status\n", + "status\n", + }, + // General + JSONMessage{ + Time: now, + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now, 0).Format(timeutils.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now, 0).Format(timeutils.RFC3339NanoFixed)), + }, + // Stream over status + JSONMessage{ + Status: "status", + Stream: "stream", + }: { + "stream", + "stream", + }, + // With progress message + JSONMessage{ + Status: "status", + ProgressMessage: "progressMessage", + }: { + "status progressMessage", + "status progressMessage", + }, + // With progress, stream empty + JSONMessage{ + Status: "status", + Stream: "", + Progress: &JSONProgress{Current: 1}, + }: { + "", + fmt.Sprintf("%c[2K\rstatus 1 B\r", 27), + }, + } + + // The tests :) + for jsonMessage, expectedMessages := range messages { + // Without terminal + data := bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, false); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String()) + } + // With terminal + data = bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, true); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String()) + } + } +} + +// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. +func TestJSONMessageDisplayWithJSONError(t *testing.T) { + data := bytes.NewBuffer([]byte{}) + jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} + + err := jsonMessage.Display(data, true) + if err == nil || err.Error() != "Can't find it" { + t.Fatalf("Expected a JSONError 404, got [%v]", err) + } + + jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} + err = jsonMessage.Display(data, true) + if err == nil || err.Error() != "Authentication is required." { + t.Fatalf("Expected an error [Authentication is required.], got [%v]", err) + } +} + +func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { + var ( + inFd uintptr + ) + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader("This is not a 'valid' JSON []") + inFd, _ = term.GetFdInfo(reader) + + if err := DisplayJSONMessagesStream(reader, data, inFd, false); err == nil && err.Error()[:17] != "invalid character" { + t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err) + } +} + +func TestDisplayJSONMessagesStream(t *testing.T) { + var ( + inFd uintptr + ) + + messages := map[string][]string{ + // empty string + "": { + "", + ""}, + // Without progress & ID + "{ \"status\": \"status\" }": { + "status\n", + "status\n", + }, + // Without progress, with ID + "{ \"id\": \"ID\",\"status\": \"status\" }": { + "ID: status\n", + fmt.Sprintf("ID: status\n%c[%dB", 27, 0), + }, + // With progress + "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { + "ID: status ProgressMessage", + fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 0, 27, 0), + }, + // With progressDetail + "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { + "", // progressbar is disabled in non-terminal + fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 0, 27, 27, 0), + }, + } + for jsonMessage, expectedMessages := range messages { + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader(jsonMessage) + inFd, _ = term.GetFdInfo(reader) + + // Without terminal + if err := DisplayJSONMessagesStream(reader, data, inFd, false); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String()) + } + + // With terminal + data = bytes.NewBuffer([]byte{}) + reader = strings.NewReader(jsonMessage) + if err := DisplayJSONMessagesStream(reader, data, inFd, true); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String()) + } + } + +} diff --git a/pkg/listenbuffer/README.md b/pkg/listenbuffer/README.md new file mode 100644 index 00000000..22735098 --- /dev/null +++ b/pkg/listenbuffer/README.md @@ -0,0 +1,27 @@ +# listenbuffer + +listenbuffer uses the kernel's listening backlog functionality to queue +connections, allowing applications to start listening immediately and handle +connections later. This is signaled by closing the activation channel passed to +the constructor. + +The maximum amount of queued connections depends on the configuration of your +kernel (typically called SOMAXXCON) and cannot be configured in Go with the +net package. See `src/net/sock_platform.go` in the Go tree or consult your +kernel's manual. + + activator := make(chan struct{}) + buffer, err := NewListenBuffer("tcp", "localhost:4000", activator) + if err != nil { + panic(err) + } + + // will block until activator has been closed or is sent an event + client, err := buffer.Accept() + +Somewhere else in your application once it's been booted: + + close(activator) + +`buffer.Accept()` will return the first client in the kernel listening queue, or +continue to block until a client connects or an error occurs. diff --git a/pkg/listenbuffer/buffer.go b/pkg/listenbuffer/buffer.go new file mode 100644 index 00000000..aa47471c --- /dev/null +++ b/pkg/listenbuffer/buffer.go @@ -0,0 +1,76 @@ +/* +Package listenbuffer uses the kernel's listening backlog functionality to queue +connections, allowing applications to start listening immediately and handle +connections later. This is signaled by closing the activation channel passed to +the constructor. + +The maximum amount of queued connections depends on the configuration of your +kernel (typically called SOMAXXCON) and cannot be configured in Go with the +net package. See `src/net/sock_platform.go` in the Go tree or consult your +kernel's manual. + + activator := make(chan struct{}) + buffer, err := NewListenBuffer("tcp", "localhost:4000", activator) + if err != nil { + panic(err) + } + + // will block until activator has been closed or is sent an event + client, err := buffer.Accept() + +Somewhere else in your application once it's been booted: + + close(activator) + +`buffer.Accept()` will return the first client in the kernel listening queue, or +continue to block until a client connects or an error occurs. +*/ +package listenbuffer + +import "net" + +// NewListenBuffer returns a net.Listener listening on addr with the protocol +// passed. The channel passed is used to activate the listenbuffer when the +// caller is ready to accept connections. +func NewListenBuffer(proto, addr string, activate <-chan struct{}) (net.Listener, error) { + wrapped, err := net.Listen(proto, addr) + if err != nil { + return nil, err + } + + return &defaultListener{ + wrapped: wrapped, + activate: activate, + }, nil +} + +// defaultListener is the buffered wrapper around the net.Listener +type defaultListener struct { + wrapped net.Listener // The net.Listener wrapped by listenbuffer + ready bool // Whether the listenbuffer has been activated + activate <-chan struct{} // Channel to control activation of the listenbuffer +} + +// Close closes the wrapped socket. +func (l *defaultListener) Close() error { + return l.wrapped.Close() +} + +// Addr returns the listening address of the wrapped socket. +func (l *defaultListener) Addr() net.Addr { + return l.wrapped.Addr() +} + +// Accept returns a client connection on the wrapped socket if the listen buffer +// has been activated. To active the listenbuffer the activation channel passed +// to NewListenBuffer must have been closed or sent an event. +func (l *defaultListener) Accept() (net.Conn, error) { + // if the listen has been told it is ready then we can go ahead and + // start returning connections + if l.ready { + return l.wrapped.Accept() + } + <-l.activate + l.ready = true + return l.Accept() +} diff --git a/pkg/listenbuffer/listen_buffer_test.go b/pkg/listenbuffer/listen_buffer_test.go new file mode 100644 index 00000000..6ffd2f79 --- /dev/null +++ b/pkg/listenbuffer/listen_buffer_test.go @@ -0,0 +1,41 @@ +package listenbuffer + +import ( + "io/ioutil" + "net" + "testing" +) + +func TestListenBufferAllowsAcceptingWhenActivated(t *testing.T) { + lock := make(chan struct{}) + buffer, err := NewListenBuffer("tcp", "", lock) + if err != nil { + t.Fatal("Unable to create listen buffer: ", err) + } + + go func() { + conn, err := net.Dial("tcp", buffer.Addr().String()) + if err != nil { + t.Fatal("Client failed to establish connection to server: ", err) + } + + conn.Write([]byte("ping")) + conn.Close() + }() + + close(lock) + + client, err := buffer.Accept() + if err != nil { + t.Fatal("Failed to accept client: ", err) + } + + response, err := ioutil.ReadAll(client) + if err != nil { + t.Fatal("Failed to read from client: ", err) + } + + if string(response) != "ping" { + t.Fatal("Expected to receive ping from client, received: ", string(response)) + } +} diff --git a/pkg/mflag/LICENSE b/pkg/mflag/LICENSE new file mode 100644 index 00000000..ac74d8f0 --- /dev/null +++ b/pkg/mflag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/mflag/README.md b/pkg/mflag/README.md new file mode 100644 index 00000000..da00efa3 --- /dev/null +++ b/pkg/mflag/README.md @@ -0,0 +1,40 @@ +Package mflag (aka multiple-flag) implements command-line flag parsing. +It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) + +It adds: + +* both short and long flag version +`./example -s red` `./example --string blue` + +* multiple names for the same option +``` +$>./example -h +Usage of example: + -s, --string="": a simple string +``` + +___ +It is very flexible on purpose, so you can do things like: +``` +$>./example -h +Usage of example: + -s, -string, --string="": a simple string +``` + +Or: +``` +$>./example -h +Usage of example: + -oldflag, --newflag="": a simple string +``` + +You can also hide some flags from the usage, so if we want only `--newflag`: +``` +$>./example -h +Usage of example: + --newflag="": a simple string +$>./example -oldflag str +str +``` + +See [example.go](example/example.go) for more details. diff --git a/pkg/mflag/example/example.go b/pkg/mflag/example/example.go new file mode 100644 index 00000000..2e766dd1 --- /dev/null +++ b/pkg/mflag/example/example.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + i int + str string + b, b2, h bool +) + +func init() { + flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") + flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") + flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") + flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") + flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") + flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage + flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") + flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") + flag.Parse() +} +func main() { + if h { + flag.PrintDefaults() + } else { + fmt.Printf("s/#hidden/-string: %s\n", str) + fmt.Printf("b: %t\n", b) + fmt.Printf("-bool: %t\n", b2) + fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) + fmt.Printf("ARGS: %v\n", flag.Args()) + } +} diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go new file mode 100644 index 00000000..1c068df5 --- /dev/null +++ b/pkg/mflag/flag.go @@ -0,0 +1,1195 @@ +// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package flag implements command-line flag parsing. + + Usage: + + Define flags using flag.String(), Bool(), Int(), etc. + + This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. + import "flag /github.com/docker/docker/pkg/mflag" + var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + // -flaghidden will work, but will be hidden from the usage + flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") + } + Or you can create custom flags that satisfy the Value interface (with + pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, []string{"name"}, "help message for flagname") + For such flags, the default value is just the initial value of the variable. + + You can also add "deprecated" flags, they are still usable, but are not shown + in the usage and will display a warning when you try to use them. `#` before + an option means this option is deprecated, if there is an following option + without `#` ahead, then that's the replacement, if not, it will just be removed: + var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") + this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or + this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` + var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") + will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` + so you can only use `-f`. + + You can also group one letter flags, bif you declare + var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") + var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") + you will be able to use the -vs or -sv + + After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. + + Flags may then be used directly. If you're using the flags themselves, + they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + + After parsing, the arguments after the flag are available as the + slice flag.Args() or individually as flag.Arg(i). + The arguments are indexed from 0 through flag.NArg()-1. + + Command line flag syntax: + -flag + -flag=x + -flag="x" + -flag='x' + -flag x // non-boolean flags only + One or two minus signs may be used; they are equivalent. + The last form is not permitted for boolean flags because the + meaning of the command + cmd -x * + will change if there is a file called 0, false, etc. You must + use the -flag=false form to turn off a boolean flag. + + Flag parsing stops just before the first non-flag argument + ("-" is a non-flag argument) or after the terminator "--". + + Integer flags accept 1234, 0664, 0x1234 and may be negative. + Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. + Duration flags accept any input valid for time.ParseDuration. + + The default set of command-line flags is controlled by + top-level functions. The FlagSet type allows one to define + independent sets of flags, such as to implement subcommands + in a command-line interface. The methods of FlagSet are + analogous to the top-level functions for the command-line + flag set. +*/ +package mflag + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/docker/docker/pkg/homedir" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// ErrRetry is the error returned if you need to try letter by letter +var ErrRetry = errors.New("flag: retry") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return string(*s) } + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, +// the command-line parser makes -name equivalent to -name=true +// rather than using the next command-line argument. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// A FlagSet represents a set of defined flags. The zero value of a FlagSet +// has no name and has ContinueOnError error handling. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + ShortUsage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + args []string // arguments after flags + errorHandling ErrorHandling + output io.Writer // nil means stderr; use Out() accessor + nArgRequirements []nArgRequirement +} + +// A Flag represents the state of a flag. +type Flag struct { + Names []string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +type flagSlice []string + +func (p flagSlice) Len() int { return len(p) } +func (p flagSlice) Less(i, j int) bool { + pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") + lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) + if lpi != lpj { + return lpi < lpj + } + return pi < pj +} +func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + var list flagSlice + + // The sorted list is based on the first name, when flag map might use the other names. + nameMap := make(map[string]string) + + for n, f := range flags { + fName := strings.TrimPrefix(f.Names[0], "#") + nameMap[fName] = n + if len(f.Names) == 1 { + list = append(list, fName) + continue + } + + found := false + for _, name := range list { + if name == fName { + found = true + break + } + } + if !found { + list = append(list, fName) + } + } + sort.Sort(list) + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[nameMap[name]] + } + return result +} + +// Name returns the name of the FlagSet. +func (f *FlagSet) Name() string { + return f.name +} + +// Out returns the destination for usage and error messages. +func (f *FlagSet) Out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.formal[name] +} + +// Indicates whether the specified flag was specified at all on the cmd line +func (f *FlagSet) IsSet(name string) bool { + return f.actual[name] != nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.formal[name] +} + +// Indicates whether the specified flag was specified at all on the cmd line +func IsSet(name string) bool { + return CommandLine.IsSet(name) +} + +type nArgRequirementType int + +// Indicator used to pass to BadArgs function +const ( + Exact nArgRequirementType = iota + Max + Min +) + +type nArgRequirement struct { + Type nArgRequirementType + N int +} + +// Require adds a requirement about the number of arguments for the FlagSet. +// The first parameter can be Exact, Max, or Min to respectively specify the exact, +// the maximum, or the minimal number of arguments required. +// The actual check is done in FlagSet.CheckArgs(). +func (f *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { + f.nArgRequirements = append(f.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) +} + +// CheckArgs uses the requirements set by FlagSet.Require() to validate +// the number of arguments. If the requirements are not met, +// an error message string is returned. +func (f *FlagSet) CheckArgs() (message string) { + for _, req := range f.nArgRequirements { + var arguments string + if req.N == 1 { + arguments = "1 argument" + } else { + arguments = fmt.Sprintf("%d arguments", req.N) + } + + str := func(kind string) string { + return fmt.Sprintf("%q requires %s%s", f.name, kind, arguments) + } + + switch req.Type { + case Exact: + if f.NArg() != req.N { + return str("") + } + case Max: + if f.NArg() > req.N { + return str("a maximum of ") + } + case Min: + if f.NArg() < req.N { + return str("a minimum of ") + } + } + } + return "" +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + flag, ok := f.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if err := flag.Value.Set(value); err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + writer := tabwriter.NewWriter(f.Out(), 20, 1, 3, ' ', 0) + home := homedir.Get() + + // Don't substitute when HOME is / + if runtime.GOOS != "windows" && home == "/" { + home = "" + } + + // Add a blank line between cmd description and list of options + if f.FlagCount() > 0 { + fmt.Fprintln(writer, "") + } + + f.VisitAll(func(flag *Flag) { + format := " -%s=%s" + names := []string{} + for _, name := range flag.Names { + if name[0] != '#' { + names = append(names, name) + } + } + if len(names) > 0 && len(flag.Usage) > 0 { + val := flag.DefValue + + if home != "" && strings.HasPrefix(val, home) { + val = homedir.GetShortcutString() + val[len(home):] + } + + fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) + for i, line := range strings.Split(flag.Usage, "\n") { + if i != 0 { + line = " " + line + } + fmt.Fprintln(writer, "\t", line) + } + } + }) + writer.Flush() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + if f.name == "" { + fmt.Fprintf(f.Out(), "Usage:\n") + } else { + fmt.Fprintf(f.Out(), "Usage of %s:\n", f.name) + } + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// Usage prints to standard error a usage message documenting the standard command layout +// The function is a variable that may be changed to point to a custom function. +var ShortUsage = func() { + fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) +} + +// FlagCount returns the number of flags that have been defined. +func (f *FlagSet) FlagCount() int { return len(sortFlags(f.formal)) } + +// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. +func (f *FlagSet) FlagCountUndeprecated() int { + count := 0 + for _, flag := range sortFlags(f.formal) { + for _, name := range flag.Names { + if name[0] != '#' { + count++ + break + } + } + } + return count +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { + f.Var(newBoolValue(value, p), names, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, names []string, value bool, usage string) { + CommandLine.Var(newBoolValue(value, p), names, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(names []string, value bool, usage string) *bool { + p := new(bool) + f.BoolVar(p, names, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(names []string, value bool, usage string) *bool { + return CommandLine.Bool(names, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, names []string, value int, usage string) { + f.Var(newIntValue(value, p), names, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, names []string, value int, usage string) { + CommandLine.Var(newIntValue(value, p), names, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(names []string, value int, usage string) *int { + p := new(int) + f.IntVar(p, names, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(names []string, value int, usage string) *int { + return CommandLine.Int(names, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { + f.Var(newInt64Value(value, p), names, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, names []string, value int64, usage string) { + CommandLine.Var(newInt64Value(value, p), names, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(names []string, value int64, usage string) *int64 { + p := new(int64) + f.Int64Var(p, names, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(names []string, value int64, usage string) *int64 { + return CommandLine.Int64(names, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { + f.Var(newUintValue(value, p), names, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, names []string, value uint, usage string) { + CommandLine.Var(newUintValue(value, p), names, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(names []string, value uint, usage string) *uint { + p := new(uint) + f.UintVar(p, names, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(names []string, value uint, usage string) *uint { + return CommandLine.Uint(names, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { + f.Var(newUint64Value(value, p), names, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, names []string, value uint64, usage string) { + CommandLine.Var(newUint64Value(value, p), names, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64Var(p, names, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(names []string, value uint64, usage string) *uint64 { + return CommandLine.Uint64(names, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, names []string, value string, usage string) { + f.Var(newStringValue(value, p), names, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, names []string, value string, usage string) { + CommandLine.Var(newStringValue(value, p), names, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(names []string, value string, usage string) *string { + p := new(string) + f.StringVar(p, names, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(names []string, value string, usage string) *string { + return CommandLine.String(names, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { + f.Var(newFloat64Value(value, p), names, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, names []string, value float64, usage string) { + CommandLine.Var(newFloat64Value(value, p), names, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(names []string, value float64, usage string) *float64 { + p := new(float64) + f.Float64Var(p, names, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(names []string, value float64, usage string) *float64 { + return CommandLine.Float64(names, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + f.Var(newDurationValue(value, p), names, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + CommandLine.Var(newDurationValue(value, p), names, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVar(p, names, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(names []string, value time.Duration, usage string) *time.Duration { + return CommandLine.Duration(names, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, names []string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{names, usage, value, value.String()} + for _, name := range names { + name = strings.TrimPrefix(name, "#") + _, alreadythere := f.formal[name] + if alreadythere { + var msg string + if f.name == "" { + msg = fmt.Sprintf("flag redefined: %s", name) + } else { + msg = fmt.Sprintf("%s flag redefined: %s", f.name, name) + } + fmt.Fprintln(f.Out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[string]*Flag) + } + f.formal[name] = flag + } +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, names []string, usage string) { + CommandLine.Var(value, names, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.Out(), err) + if os.Args[0] == f.name { + fmt.Fprintf(f.Out(), "See '%s --help'.\n", os.Args[0]) + } else { + fmt.Fprintf(f.Out(), "See '%s %s --help'.\n", os.Args[0], f.name) + } + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func trimQuotes(str string) string { + if len(str) == 0 { + return str + } + type quote struct { + start, end byte + } + + // All valid quote types. + quotes := []quote{ + // Double quotes + { + start: '"', + end: '"', + }, + + // Single quotes + { + start: '\'', + end: '\'', + }, + } + + for _, quote := range quotes { + // Only strip if outermost match. + if str[0] == quote.start && str[len(str)-1] == quote.end { + str = str[1 : len(str)-1] + break + } + } + + return str +} + +// parseOne parses one flag. It reports whether a flag was seen. +func (f *FlagSet) parseOne() (bool, string, error) { + if len(f.args) == 0 { + return false, "", nil + } + s := f.args[0] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + return false, "", nil + } + if s[1] == '-' && len(s) == 2 { // "--" terminates the flags + f.args = f.args[1:] + return false, "", nil + } + name := s[1:] + if len(name) == 0 || name[0] == '=' { + return false, "", f.failf("bad flag syntax: %s", s) + } + + // it's a flag. does it have an argument? + f.args = f.args[1:] + hasValue := false + value := "" + if i := strings.Index(name, "="); i != -1 { + value = trimQuotes(name[i+1:]) + hasValue = true + name = name[:i] + } + + m := f.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "-help" || name == "help" || name == "h" { // special case for nice help message. + f.usage() + return false, "", ErrHelp + } + if len(name) > 0 && name[0] == '-' { + return false, "", f.failf("flag provided but not defined: -%s", name) + } + return false, name, ErrRetry + } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if hasValue { + if err := fv.Set(value); err != nil { + return false, "", f.failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + fv.Set("true") + } + } else { + // It must have a value, which might be the next argument. + if !hasValue && len(f.args) > 0 { + // value is the next arg + hasValue = true + value, f.args = f.args[0], f.args[1:] + } + if !hasValue { + return false, "", f.failf("flag needs an argument: -%s", name) + } + if err := flag.Value.Set(value); err != nil { + return false, "", f.failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + for i, n := range flag.Names { + if n == fmt.Sprintf("#%s", name) { + replacement := "" + for j := i; j < len(flag.Names); j++ { + if flag.Names[j][0] != '#' { + replacement = flag.Names[j] + break + } + } + if replacement != "" { + fmt.Fprintf(f.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) + } else { + fmt.Fprintf(f.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) + } + } + } + return true, "", nil +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = arguments + for { + seen, name, err := f.parseOne() + if seen { + continue + } + if err == nil { + break + } + if err == ErrRetry { + if len(name) > 1 { + err = nil + for _, letter := range strings.Split(name, "") { + f.args = append([]string{"-" + letter}, f.args...) + seen2, _, err2 := f.parseOne() + if seen2 { + continue + } + if err2 != nil { + err = f.failf("flag provided but not defined: -%s", name) + break + } + } + if err == nil { + continue + } + } else { + err = f.failf("flag provided but not defined: -%s", name) + } + } + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// ParseFlags is a utility function that adds a help flag if withHelp is true, +// calls cmd.Parse(args) and prints a relevant error message if there are +// incorrect number of arguments. It returns error only if error handling is +// set to ContinueOnError and parsing fails. If error handling is set to +// ExitOnError, it's safe to ignore the return value. +func (cmd *FlagSet) ParseFlags(args []string, withHelp bool) error { + var help *bool + if withHelp { + help = cmd.Bool([]string{"#help", "-help"}, false, "Print usage") + } + if err := cmd.Parse(args); err != nil { + return err + } + if help != nil && *help { + cmd.SetOutput(os.Stdout) + cmd.Usage() + os.Exit(0) + } + if str := cmd.CheckArgs(); str != "" { + cmd.SetOutput(os.Stderr) + cmd.ReportError(str, withHelp) + cmd.ShortUsage() + os.Exit(1) + } + return nil +} + +func (cmd *FlagSet) ReportError(str string, withHelp bool) { + if withHelp { + if os.Args[0] == cmd.Name() { + str += ".\nSee '" + os.Args[0] + " --help'" + } else { + str += ".\nSee '" + os.Args[0] + " " + cmd.Name() + " --help'" + } + } + fmt.Fprintf(cmd.Out(), "docker: %s.\n", str) +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +// The top-level functions such as BoolVar, Arg, and on are wrappers for the +// methods of CommandLine. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling +} + +type mergeVal struct { + Value + key string + fset *FlagSet +} + +func (v mergeVal) Set(s string) error { + return v.fset.Set(v.key, s) +} + +func (v mergeVal) IsBoolFlag() bool { + if b, ok := v.Value.(boolFlag); ok { + return b.IsBoolFlag() + } + return false +} + +func Merge(dest *FlagSet, flagsets ...*FlagSet) error { + for _, fset := range flagsets { + for k, f := range fset.formal { + if _, ok := dest.formal[k]; ok { + var err error + if fset.name == "" { + err = fmt.Errorf("flag redefined: %s", k) + } else { + err = fmt.Errorf("%s flag redefined: %s", fset.name, k) + } + fmt.Fprintln(fset.Out(), err.Error()) + // Happens only if flags are declared with identical names + switch dest.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + newF := *f + newF.Value = mergeVal{f.Value, k, fset} + dest.formal[k] = &newF + } + } + return nil +} + +func (f *FlagSet) IsEmpty() bool { + return len(f.actual) == 0 +} diff --git a/pkg/mflag/flag_test.go b/pkg/mflag/flag_test.go new file mode 100644 index 00000000..85f32c8a --- /dev/null +++ b/pkg/mflag/flag_test.go @@ -0,0 +1,516 @@ +// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mflag + +import ( + "bytes" + "fmt" + "os" + "sort" + "strings" + "testing" + "time" +) + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + CommandLine = NewFlagSet(os.Args[0], ContinueOnError) + Usage = usage +} +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, false, "bool value") + Int([]string{"test_int"}, 0, "int value") + Int64([]string{"test_int64"}, 0, "int64 value") + Uint([]string{"test_uint"}, 0, "uint value") + Uint64([]string{"test_uint64"}, 0, "uint64 value") + String([]string{"test_string"}, "0", "string value") + Float64([]string{"test_float64"}, 0, "float64 value") + Duration([]string{"test_duration"}, 0, "time.Duration value") + + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + m[name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", name) + } + } + } + } + VisitAll(visitor) + if len(m) != 8 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + desired = "1" + Visit(visitor) + if len(m) != 8 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { + for _, name := range f.Names { + flagNames = append(flagNames, name) + } + }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestGet(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, true, "bool value") + Int([]string{"test_int"}, 1, "int value") + Int64([]string{"test_int64"}, 2, "int64 value") + Uint([]string{"test_uint"}, 3, "uint value") + Uint64([]string{"test_uint64"}, 4, "uint64 value") + String([]string{"test_string"}, "5", "string value") + Float64([]string{"test_float64"}, 6, "float64 value") + Duration([]string{"test_duration"}, 7, "time.Duration value") + + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + g, ok := f.Value.(Getter) + if !ok { + t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) + return + } + switch name { + case "test_bool": + ok = g.Get() == true + case "test_int": + ok = g.Get() == int(1) + case "test_int64": + ok = g.Get() == int64(2) + case "test_uint": + ok = g.Get() == uint(3) + case "test_uint64": + ok = g.Get() == uint64(4) + case "test_string": + ok = g.Get() == "5" + case "test_float64": + ok = g.Get() == float64(6) + case "test_duration": + ok = g.Get() == time.Duration(7) + } + if !ok { + t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) + } + } + } + } + VisitAll(visitor) +} + +func testParse(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolFlag := f.Bool([]string{"bool"}, false, "bool value") + bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") + f.Bool([]string{"bool3"}, false, "bool3 value") + bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") + intFlag := f.Int([]string{"-int"}, 0, "int value") + int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") + uintFlag := f.Uint([]string{"uint"}, 0, "uint value") + uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") + stringFlag := f.String([]string{"string"}, "0", "string value") + f.String([]string{"string2"}, "0", "string2 value") + singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") + doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") + mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") + mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") + nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") + nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") + float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") + durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") + extra := "one-extra-argument" + args := []string{ + "-bool", + "-bool2=true", + "-bool4=false", + "--int", "22", + "--int64", "0x23", + "-uint", "24", + "--uint64", "25", + "-string", "hello", + "-squote='single'", + `-dquote="double"`, + `-mquote='mixed"`, + `-mquote2="mixed2'`, + `-nquote="'single nested'"`, + `-nquote2='"double nested"'`, + "-float64", "2718e28", + "-duration", "2m", + extra, + } + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag != true { + t.Error("bool flag should be true, is ", *boolFlag) + } + if *bool2Flag != true { + t.Error("bool2 flag should be true, is ", *bool2Flag) + } + if !f.IsSet("bool2") { + t.Error("bool2 should be marked as set") + } + if f.IsSet("bool3") { + t.Error("bool3 should not be marked as set") + } + if !f.IsSet("bool4") { + t.Error("bool4 should be marked as set") + } + if *bool4Flag != false { + t.Error("bool4 flag should be false, is ", *bool4Flag) + } + if *intFlag != 22 { + t.Error("int flag should be 22, is ", *intFlag) + } + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) + } + if *uintFlag != 24 { + t.Error("uint flag should be 24, is ", *uintFlag) + } + if *uint64Flag != 25 { + t.Error("uint64 flag should be 25, is ", *uint64Flag) + } + if *stringFlag != "hello" { + t.Error("string flag should be `hello`, is ", *stringFlag) + } + if !f.IsSet("string") { + t.Error("string flag should be marked as set") + } + if f.IsSet("string2") { + t.Error("string2 flag should not be marked as set") + } + if *singleQuoteFlag != "single" { + t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) + } + if *doubleQuoteFlag != "double" { + t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) + } + if *mixedQuoteFlag != `'mixed"` { + t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) + } + if *mixed2QuoteFlag != `"mixed2'` { + t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) + } + if *nestedQuoteFlag != "'single nested'" { + t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) + } + if *nested2QuoteFlag != `"double nested"` { + t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) + } + if *float64Flag != 2718e28 { + t.Error("float64 flag should be 2718e28, is ", *float64Flag) + } + if *durationFlag != 2*time.Minute { + t.Error("duration flag should be 2m, is ", *durationFlag) + } + if len(f.Args()) != 1 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } +} + +func testPanic(f *FlagSet, t *testing.T) { + f.Int([]string{"-int"}, 0, "int value") + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + args := []string{ + "-int", "21", + } + f.Parse(args) +} + +func TestParsePanic(t *testing.T) { + ResetForTesting(func() {}) + testPanic(CommandLine, t) +} + +func TestParse(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParse(CommandLine, t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(NewFlagSet("test", ContinueOnError), t) +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.Var(&v, []string{"v"}, "usage") + if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +// Declare a user-defined boolean flag type. +type boolFlagVar struct { + count int +} + +func (b *boolFlagVar) String() string { + return fmt.Sprintf("%d", b.count) +} + +func (b *boolFlagVar) Set(value string) error { + if value == "true" { + b.count++ + } + return nil +} + +func (b *boolFlagVar) IsBoolFlag() bool { + return b.count < 4 +} + +func TestUserDefinedBool(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var b boolFlagVar + var err error + flags.Var(&b, []string{"b"}, "usage") + if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { + if b.count < 4 { + t.Error(err) + } + } + + if b.count != 4 { + t.Errorf("want: %d; got: %d", 4, b.count) + } + + if err == nil { + t.Error("expected error; got none") + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse([]string{"-unknown"}) + if out := buf.String(); !strings.Contains(out, "-unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} + before := Bool([]string{"before"}, false, "") + if err := CommandLine.Parse(os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = Args() + after := Bool([]string{"after"}, false, "") + Parse() + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") + // Regular flag invocation should work + err := fs.Parse([]string{"-flag=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by -flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse([]string{"-help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, []string{"help"}, false, "help flag") + helpCalled = false + err = fs.Parse([]string{"-help"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} + +// Test the flag count functions. +func TestFlagCounts(t *testing.T) { + fs := NewFlagSet("help test", ContinueOnError) + var flag bool + fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") + fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") + + if fs.FlagCount() != 6 { + t.Fatal("FlagCount wrong. ", fs.FlagCount()) + } + if fs.FlagCountUndeprecated() != 4 { + t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) + } + if fs.NFlag() != 0 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } + err := fs.Parse([]string{"-fd", "-g", "-flag4"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if fs.NFlag() != 4 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } +} + +// Show up bug in sortFlags +func TestSortFlags(t *testing.T) { + fs := NewFlagSet("help TestSortFlags", ContinueOnError) + + var err error + + var b bool + fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") + + err = fs.Parse([]string{"--banana=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + count := 0 + + fs.VisitAll(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("VisitAll should not return a nil flag") + } + }) + flagcount := fs.FlagCount() + if flagcount != count { + t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) + } + // Make sure its idempotent + if flagcount != fs.FlagCount() { + t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) + } + + count = 0 + fs.Visit(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("Visit should not return a nil flag") + } + }) + nflag := fs.NFlag() + if nflag != count { + t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) + } + if nflag != fs.NFlag() { + t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) + } +} diff --git a/pkg/mount/flags.go b/pkg/mount/flags.go new file mode 100644 index 00000000..17dbd7a6 --- /dev/null +++ b/pkg/mount/flags.go @@ -0,0 +1,69 @@ +package mount + +import ( + "strings" +) + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + flags := map[string]struct { + clear bool + flag int + }{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, + } + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} diff --git a/pkg/mount/flags_freebsd.go b/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000..f166cb2f --- /dev/null +++ b/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff --git a/pkg/mount/flags_linux.go b/pkg/mount/flags_linux.go new file mode 100644 index 00000000..2f9f5c58 --- /dev/null +++ b/pkg/mount/flags_linux.go @@ -0,0 +1,85 @@ +package mount + +import ( + "syscall" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = syscall.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = syscall.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = syscall.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = syscall.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: creat, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = syscall.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = syscall.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = syscall.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = syscall.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = syscall.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = syscall.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = syscall.MS_BIND | syscall.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = syscall.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = syscall.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = syscall.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = syscall.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = syscall.MS_SHARED | syscall.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = syscall.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = syscall.MS_STRICTATIME +) diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go new file mode 100644 index 00000000..a90d3d11 --- /dev/null +++ b/pkg/mount/flags_unsupported.go @@ -0,0 +1,30 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go new file mode 100644 index 00000000..ed7216e5 --- /dev/null +++ b/pkg/mount/mount.go @@ -0,0 +1,74 @@ +package mount + +import ( + "time" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted looks at /proc/self/mountinfo to determine of the specified +// mountpoint has been mounted +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil +} + +// Unmount will unmount the target filesystem, so long as it is mounted. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// ForceUnmount will force an unmount of the target filesystem, regardless if +// it is mounted or not. +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return +} diff --git a/pkg/mount/mount_test.go b/pkg/mount/mount_test.go new file mode 100644 index 00000000..5c7f1b86 --- /dev/null +++ b/pkg/mount/mount_test.go @@ -0,0 +1,137 @@ +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} diff --git a/pkg/mount/mounter_freebsd.go b/pkg/mount/mounter_freebsd.go new file mode 100644 index 00000000..bb870e6f --- /dev/null +++ b/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/pkg/mount/mounter_linux.go b/pkg/mount/mounter_linux.go new file mode 100644 index 00000000..dd4280c7 --- /dev/null +++ b/pkg/mount/mounter_linux.go @@ -0,0 +1,21 @@ +package mount + +import ( + "syscall" +) + +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err + } + + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/pkg/mount/mounter_unsupported.go b/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000..eb93365e --- /dev/null +++ b/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/pkg/mount/mountinfo.go b/pkg/mount/mountinfo.go new file mode 100644 index 00000000..e3fc3535 --- /dev/null +++ b/pkg/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/pkg/mount/mountinfo_freebsd.go b/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 00000000..4f32edcd --- /dev/null +++ b/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000..be69fee1 --- /dev/null +++ b/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/pkg/mount/mountinfo_linux_test.go b/pkg/mount/mountinfo_linux_test.go new file mode 100644 index 00000000..812d12e8 --- /dev/null +++ b/pkg/mount/mountinfo_linux_test.go @@ -0,0 +1,477 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := Info{ + ID: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} diff --git a/pkg/mount/mountinfo_unsupported.go b/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 00000000..8245f01d --- /dev/null +++ b/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/pkg/mount/sharedsubtree_linux.go b/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 00000000..47303bbc --- /dev/null +++ b/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,70 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + mounted, err = Mounted(mountPoint) + if err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/pkg/mount/sharedsubtree_linux_test.go b/pkg/mount/sharedsubtree_linux_test.go new file mode 100644 index 00000000..4a8d22f0 --- /dev/null +++ b/pkg/mount/sharedsubtree_linux_test.go @@ -0,0 +1,331 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "syscall" + "testing" +) + +// nothing is propagated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propagate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is avaible in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable", sourceDir) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go new file mode 100644 index 00000000..105a122a --- /dev/null +++ b/pkg/namesgenerator/names-generator.go @@ -0,0 +1,375 @@ +package namesgenerator + +import ( + "fmt" + "math/rand" + + "github.com/docker/docker/pkg/random" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "agitated", + "angry", + "backstabbing", + "berserk", + "boring", + "clever", + "cocky", + "compassionate", + "condescending", + "cranky", + "desperate", + "determined", + "distracted", + "dreamy", + "drunk", + "ecstatic", + "elated", + "elegant", + "evil", + "fervent", + "focused", + "furious", + "gloomy", + "goofy", + "grave", + "happy", + "high", + "hopeful", + "hungry", + "insane", + "jolly", + "jovial", + "kickass", + "lonely", + "loving", + "mad", + "modest", + "naughty", + "nostalgic", + "pensive", + "prickly", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "sick", + "silly", + "sleepy", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "trusting", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + right = [...]string{ + // Muhammad ibn Jābir al-ḤarrānÄ« al-BattānÄ« was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // William Shockley, Walter Houser Brattain and John Bardeen co-invented the transistor (thanks Brian Goff). + // - https://en.wikipedia.org/wiki/John_Bardeen + // - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + // - https://en.wikipedia.org/wiki/William_Shockley + "bardeen", + "brattain", + "shockley", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millenium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + //Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // Jack Kilby and Robert Noyce have invented silicone integrated circuits and gave Silicon Valley its name. + // - https://en.wikipedia.org/wiki/Jack_Kilby + // - https://en.wikipedia.org/wiki/Robert_Noyce + "kilby", + "noyce", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Dennis Ritchie and Ken Thompson created UNIX and the C programming language. + // - https://en.wikipedia.org/wiki/Dennis_Ritchie + // - https://en.wikipedia.org/wiki/Ken_Thompson + "ritchie", + "thompson", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } + + rnd = rand.New(random.NewSource()) +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { +begin: + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + } + return name +} diff --git a/pkg/namesgenerator/names-generator_test.go b/pkg/namesgenerator/names-generator_test.go new file mode 100644 index 00000000..f7dae276 --- /dev/null +++ b/pkg/namesgenerator/names-generator_test.go @@ -0,0 +1,45 @@ +package namesgenerator + +import ( + "strings" + "testing" +) + +// Make sure the generated names are awesome +func TestGenerateAwesomeNames(t *testing.T) { + name := GetRandomName(0) + if !isAwesome(name) { + t.Fatalf("Generated name '%s' is not awesome.", name) + } +} + +func TestNameFormat(t *testing.T) { + name := GetRandomName(0) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name contains numbers!") + } +} + +func TestNameRetries(t *testing.T) { + name := GetRandomName(1) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if !strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name doesn't contain a number") + } + +} + +// To be awesome, a container name must involve cool inventors, be easy to remember, +// be at least mildly funny, and always be politically correct for enterprise adoption. +func isAwesome(name string) bool { + coolInventorNames := true + easyToRemember := true + mildlyFunnyOnOccasion := true + politicallyCorrect := true + return coolInventorNames && easyToRemember && mildlyFunnyOnOccasion && politicallyCorrect +} diff --git a/pkg/nat/nat.go b/pkg/nat/nat.go new file mode 100644 index 00000000..1fbb13e6 --- /dev/null +++ b/pkg/nat/nat.go @@ -0,0 +1,197 @@ +package nat + +// nat is a convenience package for docker's manipulation of strings describing +// network ports. + +import ( + "fmt" + "net" + "strconv" + "strings" + + "github.com/docker/docker/pkg/parsers" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portInt, err := ParsePort(port) + if err != nil { + return "", err + } + + return Port(fmt.Sprintf("%d/%s", portInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + if len(portStr) == 0 { + return 0 + } + + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := strconv.ParseUint(portStr, 10, 16) + return int(port) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + + for _, rawPort := range ports { + proto := "tcp" + + if i := strings.LastIndex(rawPort, "/"); i != -1 { + proto = rawPort[i+1:] + rawPort = rawPort[:i] + } + if !strings.Contains(rawPort, ":") { + rawPort = fmt.Sprintf("::%s", rawPort) + } else if len(strings.Split(rawPort, ":")) == 2 { + rawPort = fmt.Sprintf(":%s", rawPort) + } + + parts, err := parsers.PartParser(portSpecTemplate, rawPort) + if err != nil { + return nil, nil, err + } + + var ( + containerPort = parts["containerPort"] + rawIP = parts["ip"] + hostPort = parts["hostPort"] + ) + + if rawIP != "" && net.ParseIP(rawIP) == nil { + return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP) + } + if containerPort == "" { + return nil, nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := parsers.ParsePortRange(containerPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = parsers.ParsePortRange(hostPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + + if !validateProto(strings.ToLower(proto)) { + return nil, nil, fmt.Errorf("Invalid proto: %s", proto) + } + + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, nil, err + } + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + + binding := PortBinding{ + HostIP: rawIP, + HostPort: hostPort, + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, binding) + } + } + return exposedPorts, bindings, nil +} diff --git a/pkg/nat/nat_test.go b/pkg/nat/nat_test.go new file mode 100644 index 00000000..77baa457 --- /dev/null +++ b/pkg/nat/nat_test.go @@ -0,0 +1,461 @@ +package nat + +import ( + "testing" +) + +func TestParsePort(t *testing.T) { + var ( + p int + err error + ) + + p, err = ParsePort("1234") + + if err != nil || p != 1234 { + t.Fatal("Parsing '1234' did not succeed") + } + + // FIXME currently this is a valid port. I don't think it should be. + // I'm leaving this test commented out until we make a decision. + // - erikh + + /* + p, err = ParsePort("0123") + + if err != nil { + t.Fatal("Successfully parsed port '0123' to '123'") + } + */ + + p, err = ParsePort("asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port 'asdf' succeeded") + } + + p, err = ParsePort("1asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port '1asdf' succeeded") + } +} + +func TestPort(t *testing.T) { + p, err := NewPort("tcp", "1234") + + if err != nil { + t.Fatalf("tcp, 1234 had a parsing issue: %v", err) + } + + if string(p) != "1234/tcp" { + t.Fatal("tcp, 1234 did not result in the string 1234/tcp") + } + + if p.Proto() != "tcp" { + t.Fatal("protocol was not tcp") + } + + if p.Port() != "1234" { + t.Fatal("port string value was not 1234") + } + + if p.Int() != 1234 { + t.Fatal("port int value was not 1234") + } + + p, err = NewPort("tcp", "asd1234") + if err == nil { + t.Fatal("tcp, asd1234 was supposed to fail") + } +} + +func TestSplitProtoPort(t *testing.T) { + var ( + proto string + port string + ) + + proto, port = SplitProtoPort("1234/tcp") + + if proto != "tcp" || port != "1234" { + t.Fatal("Could not split 1234/tcp properly") + } + + proto, port = SplitProtoPort("") + + if proto != "" || port != "" { + t.Fatal("parsing an empty string yielded surprising results", proto, port) + } + + proto, port = SplitProtoPort("1234") + + if proto != "tcp" || port != "1234" { + t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) + } + + proto, port = SplitProtoPort("1234/") + + if proto != "tcp" || port != "1234" { + t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) + } + + proto, port = SplitProtoPort("/tcp") + + if proto != "" || port != "" { + t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) + } +} + +func TestParsePortSpecs(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "0.0.0.0" { + t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParsePortSpecsWithRange(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236/tcp", "2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236:1234-1236/tcp", "2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234-1236:1234-1236/tcp", "0.0.0.0:2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port { + t.Fatalf("Expect single binding to port %s but found %s", port, bindings) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234-1236:1234-1236/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublicNoPort(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100"}) + + if err == nil { + t.Logf("Expected error Invalid containerPort") + t.Fail() + } + if ports != nil { + t.Logf("Expected nil got %s", ports) + t.Fail() + } + if bindings != nil { + t.Logf("Expected nil got %s", bindings) + t.Fail() + } +} + +func TestParseNetworkOptsNegativePorts(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) + + if err == nil { + t.Fail() + } + if len(ports) != 0 { + t.Logf("Expected nil got %s", len(ports)) + t.Fail() + } + if len(bindings) != 0 { + t.Logf("Expected 0 got %s", len(bindings)) + t.Fail() + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} diff --git a/pkg/nat/sort.go b/pkg/nat/sort.go new file mode 100644 index 00000000..0a9dd078 --- /dev/null +++ b/pkg/nat/sort.go @@ -0,0 +1,97 @@ +package nat + +import ( + "sort" + "strconv" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) int64 { + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} diff --git a/pkg/nat/sort_test.go b/pkg/nat/sort_test.go new file mode 100644 index 00000000..88ed9111 --- /dev/null +++ b/pkg/nat/sort_test.go @@ -0,0 +1,85 @@ +package nat + +import ( + "fmt" + "reflect" + "testing" +) + +func TestSortUniquePorts(t *testing.T) { + ports := []Port{ + Port("6379/tcp"), + Port("22/tcp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "22/tcp" { + t.Log(fmt.Sprint(first)) + t.Fail() + } +} + +func TestSortSamePortWithDifferentProto(t *testing.T) { + ports := []Port{ + Port("8888/tcp"), + Port("8888/udp"), + Port("6379/tcp"), + Port("6379/udp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "6379/tcp" { + t.Fail() + } +} + +func TestSortPortMap(t *testing.T) { + ports := []Port{ + Port("22/tcp"), + Port("22/udp"), + Port("8000/tcp"), + Port("6379/tcp"), + Port("9999/tcp"), + } + + portMap := PortMap{ + Port("22/tcp"): []PortBinding{ + {}, + }, + Port("8000/tcp"): []PortBinding{ + {}, + }, + Port("6379/tcp"): []PortBinding{ + {}, + {HostIP: "0.0.0.0", HostPort: "32749"}, + }, + Port("9999/tcp"): []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "40000"}, + }, + } + + SortPortMap(ports, portMap) + if !reflect.DeepEqual(ports, []Port{ + Port("9999/tcp"), + Port("6379/tcp"), + Port("8000/tcp"), + Port("22/tcp"), + Port("22/udp"), + }) { + t.Errorf("failed to prioritize port with explicit mappings, got %v", ports) + } + if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "32749"}, + {}, + }) { + t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm) + } +} diff --git a/pkg/parsers/filters/parse.go b/pkg/parsers/filters/parse.go new file mode 100644 index 00000000..df5486d5 --- /dev/null +++ b/pkg/parsers/filters/parse.go @@ -0,0 +1,116 @@ +package filters + +import ( + "encoding/json" + "errors" + "regexp" + "strings" +) + +type Args map[string][]string + +// Parse the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + var filters Args = prev + if prev == nil { + filters = Args{} + } + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrorBadFormat + } + + f := strings.SplitN(arg, "=", 2) + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + filters[name] = append(filters[name], value) + + return filters, nil +} + +var ErrorBadFormat = errors.New("bad format of filter (expected name=value)") + +// packs the Args into an string for easy transport from client to server +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if len(a) == 0 { + return "", nil + } + + buf, err := json.Marshal(a) + if err != nil { + return "", err + } + return string(buf), nil +} + +// unpacks the filter Args +func FromParam(p string) (Args, error) { + args := Args{} + if len(p) == 0 { + return args, nil + } + if err := json.NewDecoder(strings.NewReader(p)).Decode(&args); err != nil { + return nil, err + } + return args, nil +} + +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if sources == nil || len(sources) == 0 { + return false + } + +outer: + for _, name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + for k, v := range sources { + if len(testKV) == 1 { + if k == testKV[0] { + continue outer + } + } else if k == testKV[0] && v == testKV[1] { + continue outer + } + } + + return false + } + + return true +} + +func (filters Args) Match(field, source string) bool { + fieldValues := filters[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + for _, name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} diff --git a/pkg/parsers/filters/parse_test.go b/pkg/parsers/filters/parse_test.go new file mode 100644 index 00000000..a141c33c --- /dev/null +++ b/pkg/parsers/filters/parse_test.go @@ -0,0 +1,218 @@ +package filters + +import ( + "sort" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = Args{} + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args["created"]) != 1 { + t.Errorf("failed to set this arg") + } + if len(args["image.name"]) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args == nil || len(args) != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrorBadFormat { + t.Fatalf("Expected ErrorBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + a := Args{ + "created": []string{"today"}, + "image.name": []string{"ubuntu*", "*untu"}, + } + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valids := map[string]Args{ + `{"key": ["value"]}`: { + "key": {"value"}, + }, + `{"key": ["value1", "value2"]}`: { + "key": {"value1", "value2"}, + }, + `{"key1": ["value1"], "key2": ["value2"]}`: { + "key1": {"value1"}, + "key2": {"value2"}, + }, + } + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + for json, expectedArgs := range valids { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if len(args) != len(expectedArgs) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs { + values := args[key] + sort.Strings(values) + sort.Strings(expectedValues) + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for index, expectedValue := range expectedValues { + if values[index] != expectedValue { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if len(a) != len(v1) { + t.Errorf("these should both be empty sets") + } +} + +func TestArgsMatchKVList(t *testing.T) { + // empty sources + args := Args{ + "created": []string{"today"}, + } + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + matches := map[*Args]string{ + &Args{}: "field", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1"}, + }: "labels", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1=value1"}, + }: "labels", + } + differs := map[*Args]string{ + &Args{ + "created": []string{"today"}, + }: "created", + &Args{ + "created": []string{"today"}, + "labels": []string{"key4"}, + }: "labels", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1=value3"}, + }: "labels", + } + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + matches := map[*Args]string{ + &Args{}: "field", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1"}, + }: "today", + &Args{ + "created": []string{"to*"}, + }: "created", + &Args{ + "created": []string{"to(.*)"}, + }: "created", + &Args{ + "created": []string{"tod"}, + }: "created", + &Args{ + "created": []string{"anything", "to*"}, + }: "created", + } + differs := map[*Args]string{ + &Args{ + "created": []string{"tomorrow"}, + }: "created", + &Args{ + "created": []string{"to(day"}, + }: "created", + &Args{ + "created": []string{"tom(.*)"}, + }: "created", + &Args{ + "created": []string{"today1"}, + "labels": []string{"today"}, + }: "created", + } + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} diff --git a/pkg/parsers/kernel/kernel.go b/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000..5f793068 --- /dev/null +++ b/pkg/parsers/kernel/kernel.go @@ -0,0 +1,95 @@ +// +build !windows + +package kernel + +import ( + "bytes" + "errors" + "fmt" +) + +type KernelVersionInfo struct { + Kernel int + Major int + Minor int + Flavor string +} + +func (k *KernelVersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// Compare two KernelVersionInfo struct. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b *KernelVersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +func GetKernelVersion() (*KernelVersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +func ParseRelease(release string) (*KernelVersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &KernelVersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/pkg/parsers/kernel/kernel_test.go b/pkg/parsers/kernel/kernel_test.go new file mode 100644 index 00000000..7f40939c --- /dev/null +++ b/pkg/parsers/kernel/kernel_test.go @@ -0,0 +1,92 @@ +package kernel + +import ( + "fmt" + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { + var ( + a *KernelVersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) + // Errors + invalids := []string{ + "3", + "a", + "a.a", + "a.a.a-a", + } + for _, invalid := range invalids { + expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) + if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { + + } + } +} + +func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 7, Minor: 20}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 20}, + &KernelVersionInfo{Kernel: 3, Major: 7, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 20}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 20}, + -1) +} diff --git a/pkg/parsers/kernel/kernel_windows.go b/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 00000000..399d63e5 --- /dev/null +++ b/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,65 @@ +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +type KernelVersionInfo struct { + kvi string + major int + minor int + build int +} + +func (k *KernelVersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +func GetKernelVersion() (*KernelVersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &KernelVersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/pkg/parsers/kernel/uname_linux.go b/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000..8ca814c1 --- /dev/null +++ b/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,16 @@ +package kernel + +import ( + "syscall" +) + +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/pkg/parsers/kernel/uname_unsupported.go b/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000..00c54225 --- /dev/null +++ b/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package kernel + +import ( + "errors" +) + +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_linux.go b/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 00000000..af185f9f --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,40 @@ +package operatingsystem + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" +) + +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 { + b = b[i+13:] + return string(b[:bytes.IndexByte(b, '"')]), nil + } + return "", errors.New("PRETTY_NAME not found") +} + +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) { + return true, nil + } + } + return false, nil +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_test.go b/pkg/parsers/operatingsystem/operatingsystem_test.go new file mode 100644 index 00000000..b7d54cbb --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_test.go @@ -0,0 +1,124 @@ +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var ( + backup = etcOsRelease + ubuntuTrusty = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + gentoo = []byte(`NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`) + noPrettyName = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + ) + + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + + defer func() { + os.Remove(etcOsRelease) + etcOsRelease = backup + }() + + for expect, osRelease := range map[string][]byte{ + "Ubuntu 14.04 LTS": ubuntuTrusty, + "Gentoo/Linux": gentoo, + "": noPrettyName, + } { + if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if s != expect { + if expect == "" { + t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err) + } else { + t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err) + } + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_windows.go b/pkg/parsers/operatingsystem/operatingsystem_windows.go new file mode 100644 index 00000000..c843c6f8 --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -0,0 +1,47 @@ +package operatingsystem + +import ( + "syscall" + "unsafe" +) + +// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c +// for a similar sample + +func GetOperatingSystem() (string, error) { + + var h syscall.Handle + + // Default return value + ret := "Unknown Operating System" + + if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return ret, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err := syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("ProductName"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return ret, err + } + ret = syscall.UTF16ToString(buf[:]) + + return ret, nil +} + +// No-op on Windows +func IsContainerized() (bool, error) { + return false, nil +} diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go new file mode 100644 index 00000000..ef7f942e --- /dev/null +++ b/pkg/parsers/parsers.go @@ -0,0 +1,170 @@ +package parsers + +import ( + "fmt" + "net/url" + "path" + "runtime" + "strconv" + "strings" +) + +// FIXME: Change this not to receive default value as parameter +func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { + addr = strings.TrimSpace(addr) + if addr == "" { + if runtime.GOOS != "windows" { + addr = fmt.Sprintf("unix://%s", defaultUnixAddr) + } else { + // Note - defaultTCPAddr already includes tcp:// prefix + addr = fmt.Sprintf("%s", defaultTCPAddr) + } + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return ParseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +func ParseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +func ParseTCPAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + hostParts := strings.Split(u.Host, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + host := hostParts[0] + if host == "" { + host = defaultAddr + } + + p, err := strconv.Atoi(hostParts[1]) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil +} + +// Get a repos name and returns the right reposName + tag|digest +// The tag can be confusing because of a port in a repository name. +// Ex: localhost.localdomain:5000/samalba/hipache:latest +// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb +func ParseRepositoryTag(repos string) (string, string) { + n := strings.Index(repos, "@") + if n >= 0 { + parts := strings.Split(repos, "@") + return parts[0], parts[1] + } + n = strings.LastIndex(repos, ":") + if n < 0 { + return repos, "" + } + if tag := repos[n+1:]; !strings.Contains(tag, "/") { + return repos[:n], tag + } + return repos, "" +} + +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} + +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get an HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} diff --git a/pkg/parsers/parsers_test.go b/pkg/parsers/parsers_test.go new file mode 100644 index 00000000..a64e6b94 --- /dev/null +++ b/pkg/parsers/parsers_test.go @@ -0,0 +1,210 @@ +package parsers + +import ( + "strings" + "testing" +) + +func TestParseHost(t *testing.T) { + var ( + defaultHttpHost = "127.0.0.1" + defaultUnix = "/var/run/docker.sock" + ) + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp://": "Invalid proto, expected tcp: ", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "": "unix:///var/run/docker.sock", + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix:///var/run/docker.sock", + "fd://": "fd://", + "fd://something": "fd://something", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v", validAddr, expectedAddr, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } +} + +func TestParseRepositoryTag(t *testing.T) { + if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } + if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } + if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } +} + +func TestParsePortMapping(t *testing.T) { + if _, err := PartParser("ip:public:private", "192.168.1.1:80"); err == nil { + t.Fatalf("Expected an error, got %v", err) + } + data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") + if err != nil { + t.Fatal(err) + } + + if len(data) != 3 { + t.FailNow() + } + if data["ip"] != "192.168.1.1" { + t.Fail() + } + if data["public"] != "80" { + t.Fail() + } + if data["private"] != "8080" { + t.Fail() + } +} + +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + +func TestParsePortRange(t *testing.T) { + if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { + t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) + } +} + +func TestParsePortRangeEmpty(t *testing.T) { + if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { + t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) + } +} + +func TestParsePortRangeWithNoRange(t *testing.T) { + start, end, err := ParsePortRange("8080") + if err != nil { + t.Fatal(err) + } + if start != 8080 || end != 8080 { + t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) + } +} + +func TestParsePortRangeIncorrectRange(t *testing.T) { + if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectEndRange(t *testing.T) { + if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectStartRange(t *testing.T) { + if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go new file mode 100644 index 00000000..3e570736 --- /dev/null +++ b/pkg/pidfile/pidfile.go @@ -0,0 +1,42 @@ +package pidfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" +) + +type PidFile struct { + path string +} + +func checkPidFileAlreadyExists(path string) error { + if pidString, err := ioutil.ReadFile(path); err == nil { + if pid, err := strconv.Atoi(string(pidString)); err == nil { + if _, err := os.Stat(filepath.Join("/proc", string(pid))); err == nil { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) + } + } + } + return nil +} + +func New(path string) (*PidFile, error) { + if err := checkPidFileAlreadyExists(path); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &PidFile{path: path}, nil +} + +func (file PidFile) Remove() error { + if err := os.Remove(file.path); err != nil { + return err + } + return nil +} diff --git a/pkg/pidfile/pidfile_test.go b/pkg/pidfile/pidfile_test.go new file mode 100644 index 00000000..6ed9cfc3 --- /dev/null +++ b/pkg/pidfile/pidfile_test.go @@ -0,0 +1,32 @@ +package pidfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestNewAndRemove(t *testing.T) { + dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile") + if err != nil { + t.Fatal("Could not create test directory") + } + + file, err := New(filepath.Join(dir, "testfile")) + if err != nil { + t.Fatal("Could not create test file", err) + } + + if err := file.Remove(); err != nil { + t.Fatal("Could not delete created test file") + } +} + +func TestRemoveInvalidPath(t *testing.T) { + file := PidFile{path: filepath.Join("foo", "bar")} + + if err := file.Remove(); err == nil { + t.Fatal("Non-existing file doesn't give an error on delete") + } +} diff --git a/pkg/plugins/client.go b/pkg/plugins/client.go new file mode 100644 index 00000000..f30f05e0 --- /dev/null +++ b/pkg/plugins/client.go @@ -0,0 +1,106 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/sockets" + "github.com/docker/docker/pkg/tlsconfig" +) + +const ( + versionMimetype = "application/vnd.docker.plugins.v1+json" + defaultTimeOut = 30 +) + +func NewClient(addr string, tlsConfig tlsconfig.Options) (*Client, error) { + tr := &http.Transport{} + + c, err := tlsconfig.Client(tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + + protoAndAddr := strings.Split(addr, "://") + sockets.ConfigureTCPTransport(tr, protoAndAddr[0], protoAndAddr[1]) + return &Client{&http.Client{Transport: tr}, protoAndAddr[1]}, nil +} + +type Client struct { + http *http.Client + addr string +} + +func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { + return c.callWithRetry(serviceMethod, args, ret, true) +} + +func (c *Client) callWithRetry(serviceMethod string, args interface{}, ret interface{}, retry bool) error { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + + req, err := http.NewRequest("POST", "/"+serviceMethod, &buf) + if err != nil { + return err + } + req.Header.Add("Accept", versionMimetype) + req.URL.Scheme = "http" + req.URL.Host = c.addr + + var retries int + start := time.Now() + + for { + resp, err := c.http.Do(req) + if err != nil { + if !retry { + return err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff) + time.Sleep(timeOff) + continue + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + remoteErr, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("Plugin Error: %s", err) + } + return fmt.Errorf("Plugin Error: %s", remoteErr) + } + + return json.NewDecoder(resp.Body).Decode(&ret) + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} diff --git a/pkg/plugins/client_test.go b/pkg/plugins/client_test.go new file mode 100644 index 00000000..6a2c96f7 --- /dev/null +++ b/pkg/plugins/client_test.go @@ -0,0 +1,107 @@ +package plugins + +import ( + "io" + "net/http" + "net/http/httptest" + "reflect" + "testing" + "time" + + "github.com/docker/docker/pkg/tlsconfig" +) + +var ( + mux *http.ServeMux + server *httptest.Server +) + +func setupRemotePluginServer() string { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + return server.URL +} + +func teardownRemotePluginServer() { + if server != nil { + server.Close() + } +} + +func TestFailedConnection(t *testing.T) { + c, _ := NewClient("tcp://127.0.0.1:1", tlsconfig.Options{InsecureSkipVerify: true}) + err := c.callWithRetry("Service.Method", nil, nil, false) + if err == nil { + t.Fatal("Unexpected successful connection") + } +} + +func TestEchoInputOutput(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", versionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, tlsconfig.Options{InsecureSkipVerify: true}) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(output, m) { + t.Fatalf("Expected %v, was %v\n", m, output) + } +} + +func TestBackoff(t *testing.T) { + cases := []struct { + retries int + expTimeOff time.Duration + }{ + {0, time.Duration(1)}, + {1, time.Duration(2)}, + {2, time.Duration(4)}, + {4, time.Duration(16)}, + {6, time.Duration(30)}, + {10, time.Duration(30)}, + } + + for _, c := range cases { + s := c.expTimeOff * time.Second + if d := backoff(c.retries); d != s { + t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d) + } + } +} + +func TestAbortRetry(t *testing.T) { + cases := []struct { + timeOff time.Duration + expAbort bool + }{ + {time.Duration(1), false}, + {time.Duration(2), false}, + {time.Duration(10), false}, + {time.Duration(30), true}, + {time.Duration(40), true}, + } + + for _, c := range cases { + s := c.timeOff * time.Second + if a := abort(time.Now(), s); a != c.expAbort { + t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a) + } + } +} diff --git a/pkg/plugins/discovery.go b/pkg/plugins/discovery.go new file mode 100644 index 00000000..2128a920 --- /dev/null +++ b/pkg/plugins/discovery.go @@ -0,0 +1,100 @@ +package plugins + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" +) + +var ( + ErrNotFound = errors.New("Plugin not found") + socketsPath = "/run/docker/plugins" + specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} +) + +type Registry interface { + Plugins() ([]*Plugin, error) + Plugin(name string) (*Plugin, error) +} + +type LocalRegistry struct{} + +func newLocalRegistry() LocalRegistry { + return LocalRegistry{} +} + +func (l *LocalRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return newLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, ErrNotFound +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return newLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.Name = name + if len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/pkg/plugins/discovery_test.go b/pkg/plugins/discovery_test.go new file mode 100644 index 00000000..5610fe1e --- /dev/null +++ b/pkg/plugins/discovery_test.go @@ -0,0 +1,169 @@ +package plugins + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "reflect" + "testing" +) + +func setup(t *testing.T) (string, func()) { + tmpdir, err := ioutil.TempDir("", "docker-test") + if err != nil { + t.Fatal(err) + } + backup := socketsPath + socketsPath = tmpdir + specsPaths = []string{tmpdir} + + return tmpdir, func() { + socketsPath = backup + os.RemoveAll(tmpdir) + } +} + +func TestLocalSocket(t *testing.T) { + tmpdir, unregister := setup(t) + defer unregister() + + cases := []string{ + filepath.Join(tmpdir, "echo.sock"), + filepath.Join(tmpdir, "echo", "echo.sock"), + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil { + t.Fatal(err) + } + + l, err := net.Listen("unix", c) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + + pp, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(p, pp) { + t.Fatalf("Expected %v, was %v\n", p, pp) + } + + if p.Name != "echo" { + t.Fatalf("Expected plugin `echo`, got %s\n", p.Name) + } + + addr := fmt.Sprintf("unix://%s", c) + if p.Addr != addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) + } + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + l.Close() + } +} + +func TestFileSpecPlugin(t *testing.T) { + tmpdir, unregister := setup(t) + defer unregister() + + cases := []struct { + path string + name string + addr string + fail bool + }{ + {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(c.name) + if c.fail && err == nil { + continue + } + + if err != nil { + t.Fatal(err) + } + + if p.Name != c.name { + t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.Name) + } + + if p.Addr != c.addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) + } + + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + } +} + +func TestFileJSONSpecPlugin(t *testing.T) { + tmpdir, unregister := setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if plugin.Name != "example" { + t.Fatalf("Expected plugin `plugin-example`, got %s\n", plugin.Name) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" { + t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile) + } + + if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" { + t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile) + } + + if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" { + t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile) + } +} diff --git a/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/pkg/plugins/pluginrpc-gen/fixtures/foo.go new file mode 100644 index 00000000..4e73fc10 --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/fixtures/foo.go @@ -0,0 +1,35 @@ +package foo + +type wobble struct { + Some string + Val string + Inception *wobble +} + +type Fooer interface{} + +type Fooer2 interface { + Foo() +} + +type Fooer3 interface { + Foo() + Bar(a string) + Baz(a string) (err error) + Qux(a, b string) (val string, err error) + Wobble() (w *wobble) + Wiggle() (w wobble) +} + +type Fooer4 interface { + Foo() error +} + +type Bar interface { + Boo(a string, b string) (s string, err error) +} + +type Fooer5 interface { + Foo() + Bar +} diff --git a/pkg/plugins/pluginrpc-gen/main.go b/pkg/plugins/pluginrpc-gen/main.go new file mode 100644 index 00000000..2130af12 --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "unicode" + "unicode/utf8" +) + +type stringSet struct { + values map[string]struct{} +} + +func (s stringSet) String() string { + return "" +} + +func (s stringSet) Set(value string) error { + s.values[value] = struct{}{} + return nil +} +func (s stringSet) GetValues() map[string]struct{} { + return s.values +} + +var ( + typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") + rpcName = flag.String("name", *typeName, "RPC name, set if different from type") + inputFile = flag.String("i", "", "input file path") + outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") + + skipFuncs map[string]struct{} + flSkipFuncs = stringSet{make(map[string]struct{})} + + flBuildTags = stringSet{make(map[string]struct{})} +) + +func errorOut(msg string, err error) { + if err == nil { + return + } + fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) + os.Exit(1) +} + +func checkFlags() error { + if *outputFile == "" { + return fmt.Errorf("missing required flag `-o`") + } + if *inputFile == "" { + return fmt.Errorf("missing required flag `-i`") + } + return nil +} + +func main() { + flag.Var(flSkipFuncs, "skip", "skip parsing for function") + flag.Var(flBuildTags, "tag", "build tags to add to generated files") + flag.Parse() + skipFuncs = flSkipFuncs.GetValues() + + errorOut("error", checkFlags()) + + pkg, err := Parse(*inputFile, *typeName) + errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) + + var analysis = struct { + InterfaceType string + RPCName string + BuildTags map[string]struct{} + *parsedPkg + }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} + var buf bytes.Buffer + + errorOut("parser error", generatedTempl.Execute(&buf, analysis)) + src, err := format.Source(buf.Bytes()) + errorOut("error formating generated source", err) + errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) +} + +func toLower(s string) string { + if s == "" { + return "" + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToLower(r)) + s[n:] +} diff --git a/pkg/plugins/pluginrpc-gen/parser.go b/pkg/plugins/pluginrpc-gen/parser.go new file mode 100644 index 00000000..b9746f87 --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/parser.go @@ -0,0 +1,162 @@ +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "reflect" + "strings" +) + +var ErrBadReturn = errors.New("found return arg with no name: all args must be named") + +type ErrUnexpectedType struct { + expected string + actual interface{} +} + +func (e ErrUnexpectedType) Error() string { + return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) +} + +type parsedPkg struct { + Name string + Functions []function +} + +type function struct { + Name string + Args []arg + Returns []arg + Doc string +} + +type arg struct { + Name string + ArgType string +} + +func (a *arg) String() string { + return strings.ToLower(a.Name) + " " + strings.ToLower(a.ArgType) +} + +// Parses the given file for an interface definition with the given name +func Parse(filePath string, objName string) (*parsedPkg, error) { + fs := token.NewFileSet() + pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) + if err != nil { + return nil, err + } + p := &parsedPkg{} + p.Name = pkg.Name.Name + obj, exists := pkg.Scope.Objects[objName] + if !exists { + return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) + } + if obj.Kind != ast.Typ { + return nil, fmt.Errorf("exected type, got %s", obj.Kind) + } + spec, ok := obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, ErrUnexpectedType{"*ast.TypeSpec", obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, ErrUnexpectedType{"*ast.InterfaceType", spec.Type} + } + + p.Functions, err = parseInterface(iface) + if err != nil { + return nil, err + } + + return p, nil +} + +func parseInterface(iface *ast.InterfaceType) ([]function, error) { + var functions []function + for _, field := range iface.Methods.List { + switch f := field.Type.(type) { + case *ast.FuncType: + method, err := parseFunc(field) + if err != nil { + return nil, err + } + if method == nil { + continue + } + functions = append(functions, *method) + case *ast.Ident: + spec, ok := f.Obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, ErrUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, ErrUnexpectedType{"*ast.TypeSpec", spec.Type} + } + funcs, err := parseInterface(iface) + if err != nil { + fmt.Println(err) + continue + } + functions = append(functions, funcs...) + default: + return nil, ErrUnexpectedType{"*astFuncType or *ast.Ident", f} + } + } + return functions, nil +} + +func parseFunc(field *ast.Field) (*function, error) { + f := field.Type.(*ast.FuncType) + method := &function{Name: field.Names[0].Name} + if _, exists := skipFuncs[method.Name]; exists { + fmt.Println("skipping:", method.Name) + return nil, nil + } + if f.Params != nil { + args, err := parseArgs(f.Params.List) + if err != nil { + return nil, err + } + method.Args = args + } + if f.Results != nil { + returns, err := parseArgs(f.Results.List) + if err != nil { + return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) + } + method.Returns = returns + } + return method, nil +} + +func parseArgs(fields []*ast.Field) ([]arg, error) { + var args []arg + for _, f := range fields { + if len(f.Names) == 0 { + return nil, ErrBadReturn + } + for _, name := range f.Names { + var typeName string + switch argType := f.Type.(type) { + case *ast.Ident: + typeName = argType.Name + case *ast.StarExpr: + i, ok := argType.X.(*ast.Ident) + if !ok { + return nil, ErrUnexpectedType{"*ast.Ident", f.Type} + } + typeName = "*" + i.Name + default: + return nil, ErrUnexpectedType{"*ast.Ident or *ast.StarExpr", f.Type} + } + + args = append(args, arg{name.Name, typeName}) + } + } + return args, nil +} diff --git a/pkg/plugins/pluginrpc-gen/parser_test.go b/pkg/plugins/pluginrpc-gen/parser_test.go new file mode 100644 index 00000000..6c5665fe --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/parser_test.go @@ -0,0 +1,168 @@ +package main + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "testing" +) + +const testFixture = "fixtures/foo.go" + +func TestParseEmptyInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 0, len(pkg.Functions)) +} + +func TestParseNonInterfaceType(t *testing.T) { + _, err := Parse(testFixture, "wobble") + if _, ok := err.(ErrUnexpectedType); !ok { + t.Fatal("expected type error when parsing non-interface type") + } +} + +func TestParseWithOneFunction(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer2") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 1, len(pkg.Functions)) + assertName(t, "Foo", pkg.Functions[0].Name) + assertNum(t, 0, len(pkg.Functions[0].Args)) + assertNum(t, 0, len(pkg.Functions[0].Returns)) +} + +func TestParseWithMultipleFuncs(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer3") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 6, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Bar", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + f = pkg.Functions[2] + assertName(t, "Baz", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[3] + assertName(t, "Qux", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", f.Args[0].Name) + assertName(t, "string", f.Args[0].ArgType) + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "val", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[4] + assertName(t, "Wobble", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "*wobble", arg.ArgType) + + f = pkg.Functions[5] + assertName(t, "Wiggle", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "wobble", arg.ArgType) +} + +func TestParseWithUnamedReturn(t *testing.T) { + _, err := Parse(testFixture, "Fooer4") + if !strings.HasSuffix(err.Error(), ErrBadReturn.Error()) { + t.Fatalf("expected ErrBadReturn, got %v", err) + } +} + +func TestEmbeddedInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer5") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 2, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Boo", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[0] + assertName(t, "s", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) +} + +func assertName(t *testing.T, expected, actual string) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual)) + } +} + +func assertNum(t *testing.T, expected, actual int) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual)) + } +} + +func fatalOut(t *testing.T, msg string) { + _, file, ln, _ := runtime.Caller(2) + t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg) +} diff --git a/pkg/plugins/pluginrpc-gen/template.go b/pkg/plugins/pluginrpc-gen/template.go new file mode 100644 index 00000000..704030cf --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/template.go @@ -0,0 +1,97 @@ +package main + +import ( + "strings" + "text/template" +) + +func printArgs(args []arg) string { + var argStr []string + for _, arg := range args { + argStr = append(argStr, arg.String()) + } + return strings.Join(argStr, ", ") +} + +func marshalType(t string) string { + switch t { + case "error": + // convert error types to plain strings to ensure the values are encoded/decoded properly + return "string" + default: + return t + } +} + +func isErr(t string) bool { + switch t { + case "error": + return true + default: + return false + } +} + +// Need to use this helper due to issues with go-vet +func buildTag(s string) string { + return "+build " + s +} + +var templFuncs = template.FuncMap{ + "printArgs": printArgs, + "marshalType": marshalType, + "isErr": isErr, + "lower": strings.ToLower, + "title": strings.Title, + "tag": buildTag, +} + +var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` +// generated code - DO NOT EDIT +{{ range $k, $v := .BuildTags }} + // {{ tag $k }} {{ end }} + +package {{ .Name }} + +import "errors" + +type client interface{ + Call(string, interface{}, interface{}) error +} + +type {{ .InterfaceType }}Proxy struct { + client +} + +{{ range .Functions }} + type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ + {{ range .Args }} + {{ title .Name }} {{ .ArgType }} {{ end }} + } + + type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ + {{ range .Returns }} + {{ title .Name }} {{ marshalType .ArgType }} {{ end }} + } + + func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { + var( + req {{ $.InterfaceType }}Proxy{{ .Name }}Request + ret {{ $.InterfaceType }}Proxy{{ .Name }}Response + ) + {{ range .Args }} + req.{{ title .Name }} = {{ lower .Name }} {{ end }} + if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { + return + } + {{ range $r := .Returns }} + {{ if isErr .ArgType }} + if ret.{{ title .Name }} != "" { + {{ lower .Name }} = errors.New(ret.{{ title .Name }}) + } {{ end }} + {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} + + return + } +{{ end }} +`)) diff --git a/pkg/plugins/plugins.go b/pkg/plugins/plugins.go new file mode 100644 index 00000000..a624e799 --- /dev/null +++ b/pkg/plugins/plugins.go @@ -0,0 +1,115 @@ +package plugins + +import ( + "errors" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/tlsconfig" +) + +var ( + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + extpointHandlers = make(map[string]func(string, *Client)) +) + +type Manifest struct { + Implements []string +} + +type Plugin struct { + Name string `json:"-"` + Addr string + TLSConfig tlsconfig.Options + Client *Client `json:"-"` + Manifest *Manifest `json:"-"` +} + +func newLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + Name: name, + Addr: addr, + TLSConfig: tlsconfig.Options{InsecureSkipVerify: true}, + } +} + +func (p *Plugin) activate() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.Client = c + + m := new(Manifest) + if err = p.Client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + logrus.Debugf("%s's manifest: %v", p.Name, m) + p.Manifest = m + + for _, iface := range m.Implements { + handler, handled := extpointHandlers[iface] + if !handled { + continue + } + handler(p.Name, p.Client) + } + return nil +} + +func load(name string) (*Plugin, error) { + registry := newLocalRegistry() + pl, err := registry.Plugin(name) + if err != nil { + return nil, err + } + if err := pl.activate(); err != nil { + return nil, err + } + return pl, nil +} + +func get(name string) (*Plugin, error) { + storage.Lock() + defer storage.Unlock() + pl, ok := storage.plugins[name] + if ok { + return pl, nil + } + pl, err := load(name) + if err != nil { + return nil, err + } + + logrus.Debugf("Plugin: %v", pl) + storage.plugins[name] = pl + return pl, nil +} + +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + for _, driver := range pl.Manifest.Implements { + logrus.Debugf("%s implements: %s", name, driver) + if driver == imp { + return pl, nil + } + } + return nil, ErrNotImplements +} + +func Handle(iface string, fn func(string, *Client)) { + extpointHandlers[iface] = fn +} diff --git a/pkg/pools/pools.go b/pkg/pools/pools.go new file mode 100644 index 00000000..c34ea92e --- /dev/null +++ b/pkg/pools/pools.go @@ -0,0 +1,117 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // Pool which returns bufio.Reader with a 32K buffer + BufioReader32KPool *BufioReaderPool + // Pool which returns bufio.Writer with a 32K buffer + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +type BufioReaderPool struct { + pool sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/pkg/pools/pools_test.go b/pkg/pools/pools_test.go new file mode 100644 index 00000000..78689800 --- /dev/null +++ b/pkg/pools/pools_test.go @@ -0,0 +1,162 @@ +package pools + +import ( + "bufio" + "bytes" + "io" + "strings" + "testing" +) + +func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + reader := BufioReader32KPool.Get(nil) + if reader == nil { + t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") + } +} + +func TestBufioReaderPoolPutAndGet(t *testing.T) { + sr := bufio.NewReader(strings.NewReader("foobar")) + reader := BufioReader32KPool.Get(sr) + if reader == nil { + t.Fatalf("BufioReaderPool should not return a nil reader.") + } + // verify the first 3 byte + buf1 := make([]byte, 3) + _, err := reader.Read(buf1) + if err != nil { + t.Fatal(err) + } + if actual := string(buf1); actual != "foo" { + t.Fatalf("The first letter should have been 'foo' but was %v", actual) + } + BufioReader32KPool.Put(reader) + // Try to read the next 3 bytes + _, err = sr.Read(make([]byte, 3)) + if err == nil || err != io.EOF { + t.Fatalf("The buffer should have been empty, issue an EOF error.") + } +} + +type simpleReaderCloser struct { + io.Reader + closed bool +} + +func (r *simpleReaderCloser) Close() error { + r.closed = true + return nil +} + +func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { + br := bufio.NewReader(strings.NewReader("")) + sr := &simpleReaderCloser{ + Reader: strings.NewReader("foobar"), + closed: false, + } + reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) + if reader == nil { + t.Fatalf("NewReadCloserWrapper should not return a nil reader.") + } + // Verify the content of reader + buf := make([]byte, 3) + _, err := reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "foo" { + t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) + } + reader.Close() + // Read 3 more bytes "bar" + _, err = reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "bar" { + t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) + } + if !sr.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + writer := BufioWriter32KPool.Get(nil) + if writer == nil { + t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") + } +} + +func TestBufioWriterPoolPutAndGet(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + writer := BufioWriter32KPool.Get(bw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + // Make sure we Flush all the way ? + writer.Flush() + bw.Flush() + if len(buf.Bytes()) != 6 { + t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) + } + // Reset the buffer + buf.Reset() + BufioWriter32KPool.Put(writer) + // Try to write something + written, err = writer.Write([]byte("barfoo")) + if err != nil { + t.Fatal(err) + } + // If we now try to flush it, it should panic (the writer is nil) + // recover it + defer func() { + if r := recover(); r == nil { + t.Fatal("Trying to flush the writter should have 'paniced', did not.") + } + }() + writer.Flush() +} + +type simpleWriterCloser struct { + io.Writer + closed bool +} + +func (r *simpleWriterCloser) Close() error { + r.closed = true + return nil +} + +func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + sw := &simpleWriterCloser{ + Writer: new(bytes.Buffer), + closed: false, + } + bw.Flush() + writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + writer.Close() + if !sw.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} diff --git a/pkg/progressreader/progressreader.go b/pkg/progressreader/progressreader.go new file mode 100644 index 00000000..908e8eee --- /dev/null +++ b/pkg/progressreader/progressreader.go @@ -0,0 +1,64 @@ +package progressreader + +import ( + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/streamformatter" +) + +// Reader with progress bar +type Config struct { + In io.ReadCloser // Stream to read from + Out io.Writer // Where to send progress bar to + Formatter *streamformatter.StreamFormatter + Size int + Current int + LastUpdate int + NewLines bool + ID string + Action string +} + +func New(newReader Config) *Config { + return &newReader +} + +func (config *Config) Read(p []byte) (n int, err error) { + read, err := config.In.Read(p) + config.Current += read + updateEvery := 1024 * 512 //512kB + if config.Size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int(0.01 * float64(config.Size)); increment < updateEvery { + updateEvery = increment + } + } + if config.Current-config.LastUpdate > updateEvery || err != nil { + updateProgress(config) + config.LastUpdate = config.Current + } + + if err != nil && read == 0 { + updateProgress(config) + if config.NewLines { + config.Out.Write(config.Formatter.FormatStatus("", "")) + } + } + return read, err +} + +func (config *Config) Close() error { + if config.Current < config.Size { + //print a full progress bar when closing prematurely + config.Current = config.Size + updateProgress(config) + } + return config.In.Close() +} + +func updateProgress(config *Config) { + progress := jsonmessage.JSONProgress{Current: config.Current, Total: config.Size} + fmtMessage := config.Formatter.FormatProgress(config.ID, config.Action, &progress) + config.Out.Write(fmtMessage) +} diff --git a/pkg/progressreader/progressreader_test.go b/pkg/progressreader/progressreader_test.go new file mode 100644 index 00000000..fdf40cbb --- /dev/null +++ b/pkg/progressreader/progressreader_test.go @@ -0,0 +1,94 @@ +package progressreader + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "testing" + + "github.com/docker/docker/pkg/streamformatter" +) + +func TestOutputOnPrematureClose(t *testing.T) { + var outBuf bytes.Buffer + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + writer := bufio.NewWriter(&outBuf) + + prCfg := Config{ + In: reader, + Out: writer, + Formatter: streamformatter.NewStreamFormatter(), + Size: len(content), + NewLines: true, + ID: "Test", + Action: "Read", + } + pr := New(prCfg) + + part := make([]byte, 4, 4) + _, err := io.ReadFull(pr, part) + if err != nil { + pr.Close() + t.Fatal(err) + } + + if err := writer.Flush(); err != nil { + pr.Close() + t.Fatal(err) + } + + tlen := outBuf.Len() + pr.Close() + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + + if outBuf.Len() == tlen { + t.Fatalf("Expected some output when closing prematurely") + } +} + +func TestCompleteSilently(t *testing.T) { + var outBuf bytes.Buffer + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + writer := bufio.NewWriter(&outBuf) + + prCfg := Config{ + In: reader, + Out: writer, + Formatter: streamformatter.NewStreamFormatter(), + Size: len(content), + NewLines: true, + ID: "Test", + Action: "Read", + } + pr := New(prCfg) + + out, err := ioutil.ReadAll(pr) + if err != nil { + pr.Close() + t.Fatal(err) + } + if string(out) != "TESTING" { + pr.Close() + t.Fatalf("Unexpected output %q from reader", string(out)) + } + + if err := writer.Flush(); err != nil { + pr.Close() + t.Fatal(err) + } + + tlen := outBuf.Len() + pr.Close() + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + + if outBuf.Len() > tlen { + t.Fatalf("Should have closed silently when read is complete") + } +} diff --git a/pkg/promise/promise.go b/pkg/promise/promise.go new file mode 100644 index 00000000..dd52b908 --- /dev/null +++ b/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/pkg/proxy/network_proxy_test.go b/pkg/proxy/network_proxy_test.go new file mode 100644 index 00000000..9e382567 --- /dev/null +++ b/pkg/proxy/network_proxy_test.go @@ -0,0 +1,216 @@ +package proxy + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "testing" + "time" +) + +var testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo") +var testBufSize = len(testBuf) + +type EchoServer interface { + Run() + Close() + LocalAddr() net.Addr +} + +type TCPEchoServer struct { + listener net.Listener + testCtx *testing.T +} + +type UDPEchoServer struct { + conn net.PacketConn + testCtx *testing.T +} + +func NewEchoServer(t *testing.T, proto, address string) EchoServer { + var server EchoServer + if strings.HasPrefix(proto, "tcp") { + listener, err := net.Listen(proto, address) + if err != nil { + t.Fatal(err) + } + server = &TCPEchoServer{listener: listener, testCtx: t} + } else { + socket, err := net.ListenPacket(proto, address) + if err != nil { + t.Fatal(err) + } + server = &UDPEchoServer{conn: socket, testCtx: t} + } + return server +} + +func (server *TCPEchoServer) Run() { + go func() { + for { + client, err := server.listener.Accept() + if err != nil { + return + } + go func(client net.Conn) { + if _, err := io.Copy(client, client); err != nil { + server.testCtx.Logf("can't echo to the client: %v\n", err.Error()) + } + client.Close() + }(client) + } + }() +} + +func (server *TCPEchoServer) LocalAddr() net.Addr { return server.listener.Addr() } +func (server *TCPEchoServer) Close() { server.listener.Addr() } + +func (server *UDPEchoServer) Run() { + go func() { + readBuf := make([]byte, 1024) + for { + read, from, err := server.conn.ReadFrom(readBuf) + if err != nil { + return + } + for i := 0; i != read; { + written, err := server.conn.WriteTo(readBuf[i:read], from) + if err != nil { + break + } + i += written + } + } + }() +} + +func (server *UDPEchoServer) LocalAddr() net.Addr { return server.conn.LocalAddr() } +func (server *UDPEchoServer) Close() { server.conn.Close() } + +func testProxyAt(t *testing.T, proto string, proxy Proxy, addr string) { + defer proxy.Close() + go proxy.Run() + client, err := net.Dial(proto, addr) + if err != nil { + t.Fatalf("Can't connect to the proxy: %v", err) + } + defer client.Close() + client.SetDeadline(time.Now().Add(10 * time.Second)) + if _, err = client.Write(testBuf); err != nil { + t.Fatal(err) + } + recvBuf := make([]byte, testBufSize) + if _, err = client.Read(recvBuf); err != nil { + t.Fatal(err) + } + if !bytes.Equal(testBuf, recvBuf) { + t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) + } +} + +func testProxy(t *testing.T, proto string, proxy Proxy) { + testProxyAt(t, proto, proxy, proxy.FrontendAddr().String()) +} + +func TestTCP4Proxy(t *testing.T) { + backend := NewEchoServer(t, "tcp", "127.0.0.1:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "tcp", proxy) +} + +func TestTCP6Proxy(t *testing.T) { + backend := NewEchoServer(t, "tcp", "[::1]:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "tcp", proxy) +} + +func TestTCPDualStackProxy(t *testing.T) { + // If I understand `godoc -src net favoriteAddrFamily` (used by the + // net.Listen* functions) correctly this should work, but it doesn't. + t.Skip("No support for dual stack yet") + backend := NewEchoServer(t, "tcp", "[::1]:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + ipv4ProxyAddr := &net.TCPAddr{ + IP: net.IPv4(127, 0, 0, 1), + Port: proxy.FrontendAddr().(*net.TCPAddr).Port, + } + testProxyAt(t, "tcp", proxy, ipv4ProxyAddr.String()) +} + +func TestUDP4Proxy(t *testing.T) { + backend := NewEchoServer(t, "udp", "127.0.0.1:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "udp", proxy) +} + +func TestUDP6Proxy(t *testing.T) { + backend := NewEchoServer(t, "udp", "[::1]:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.UDPAddr{IP: net.IPv6loopback, Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "udp", proxy) +} + +func TestUDPWriteError(t *testing.T) { + frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} + // Hopefully, this port will be free: */ + backendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 25587} + proxy, err := NewProxy(frontendAddr, backendAddr) + if err != nil { + t.Fatal(err) + } + defer proxy.Close() + go proxy.Run() + client, err := net.Dial("udp", "127.0.0.1:25587") + if err != nil { + t.Fatalf("Can't connect to the proxy: %v", err) + } + defer client.Close() + // Make sure the proxy doesn't stop when there is no actual backend: + client.Write(testBuf) + client.Write(testBuf) + backend := NewEchoServer(t, "udp", "127.0.0.1:25587") + defer backend.Close() + backend.Run() + client.SetDeadline(time.Now().Add(10 * time.Second)) + if _, err = client.Write(testBuf); err != nil { + t.Fatal(err) + } + recvBuf := make([]byte, testBufSize) + if _, err = client.Read(recvBuf); err != nil { + t.Fatal(err) + } + if !bytes.Equal(testBuf, recvBuf) { + t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) + } +} diff --git a/pkg/proxy/proxy.go b/pkg/proxy/proxy.go new file mode 100644 index 00000000..7a711f65 --- /dev/null +++ b/pkg/proxy/proxy.go @@ -0,0 +1,29 @@ +package proxy + +import ( + "fmt" + "net" +) + +type Proxy interface { + // Start forwarding traffic back and forth the front and back-end + // addresses. + Run() + // Stop forwarding traffic and close both ends of the Proxy. + Close() + // Return the address on which the proxy is listening. + FrontendAddr() net.Addr + // Return the proxied address. + BackendAddr() net.Addr +} + +func NewProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { + switch frontendAddr.(type) { + case *net.UDPAddr: + return NewUDPProxy(frontendAddr.(*net.UDPAddr), backendAddr.(*net.UDPAddr)) + case *net.TCPAddr: + return NewTCPProxy(frontendAddr.(*net.TCPAddr), backendAddr.(*net.TCPAddr)) + default: + panic(fmt.Errorf("Unsupported protocol")) + } +} diff --git a/pkg/proxy/stub_proxy.go b/pkg/proxy/stub_proxy.go new file mode 100644 index 00000000..76844270 --- /dev/null +++ b/pkg/proxy/stub_proxy.go @@ -0,0 +1,22 @@ +package proxy + +import ( + "net" +) + +type StubProxy struct { + frontendAddr net.Addr + backendAddr net.Addr +} + +func (p *StubProxy) Run() {} +func (p *StubProxy) Close() {} +func (p *StubProxy) FrontendAddr() net.Addr { return p.frontendAddr } +func (p *StubProxy) BackendAddr() net.Addr { return p.backendAddr } + +func NewStubProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { + return &StubProxy{ + frontendAddr: frontendAddr, + backendAddr: backendAddr, + }, nil +} diff --git a/pkg/proxy/tcp_proxy.go b/pkg/proxy/tcp_proxy.go new file mode 100644 index 00000000..9942e6d9 --- /dev/null +++ b/pkg/proxy/tcp_proxy.go @@ -0,0 +1,90 @@ +package proxy + +import ( + "io" + "net" + "syscall" + + "github.com/Sirupsen/logrus" +) + +type TCPProxy struct { + listener *net.TCPListener + frontendAddr *net.TCPAddr + backendAddr *net.TCPAddr +} + +func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr) (*TCPProxy, error) { + listener, err := net.ListenTCP("tcp", frontendAddr) + if err != nil { + return nil, err + } + // If the port in frontendAddr was 0 then ListenTCP will have a picked + // a port to listen on, hence the call to Addr to get that actual port: + return &TCPProxy{ + listener: listener, + frontendAddr: listener.Addr().(*net.TCPAddr), + backendAddr: backendAddr, + }, nil +} + +func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { + backend, err := net.DialTCP("tcp", nil, proxy.backendAddr) + if err != nil { + logrus.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err) + client.Close() + return + } + + event := make(chan int64) + var broker = func(to, from *net.TCPConn) { + written, err := io.Copy(to, from) + if err != nil { + // If the socket we are writing to is shutdown with + // SHUT_WR, forward it to the other end of the pipe: + if err, ok := err.(*net.OpError); ok && err.Err == syscall.EPIPE { + from.CloseWrite() + } + } + to.CloseRead() + event <- written + } + + go broker(client, backend) + go broker(backend, client) + + var transferred int64 = 0 + for i := 0; i < 2; i++ { + select { + case written := <-event: + transferred += written + case <-quit: + // Interrupt the two brokers and "join" them. + client.Close() + backend.Close() + for ; i < 2; i++ { + transferred += <-event + } + return + } + } + client.Close() + backend.Close() +} + +func (proxy *TCPProxy) Run() { + quit := make(chan bool) + defer close(quit) + for { + client, err := proxy.listener.Accept() + if err != nil { + logrus.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) + return + } + go proxy.clientLoop(client.(*net.TCPConn), quit) + } +} + +func (proxy *TCPProxy) Close() { proxy.listener.Close() } +func (proxy *TCPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } +func (proxy *TCPProxy) BackendAddr() net.Addr { return proxy.backendAddr } diff --git a/pkg/proxy/udp_proxy.go b/pkg/proxy/udp_proxy.go new file mode 100644 index 00000000..2a073dfe --- /dev/null +++ b/pkg/proxy/udp_proxy.go @@ -0,0 +1,158 @@ +package proxy + +import ( + "encoding/binary" + "net" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + UDPConnTrackTimeout = 90 * time.Second + UDPBufSize = 65507 +) + +// A net.Addr where the IP is split into two fields so you can use it as a key +// in a map: +type connTrackKey struct { + IPHigh uint64 + IPLow uint64 + Port int +} + +func newConnTrackKey(addr *net.UDPAddr) *connTrackKey { + if len(addr.IP) == net.IPv4len { + return &connTrackKey{ + IPHigh: 0, + IPLow: uint64(binary.BigEndian.Uint32(addr.IP)), + Port: addr.Port, + } + } + return &connTrackKey{ + IPHigh: binary.BigEndian.Uint64(addr.IP[:8]), + IPLow: binary.BigEndian.Uint64(addr.IP[8:]), + Port: addr.Port, + } +} + +type connTrackMap map[connTrackKey]*net.UDPConn + +type UDPProxy struct { + listener *net.UDPConn + frontendAddr *net.UDPAddr + backendAddr *net.UDPAddr + connTrackTable connTrackMap + connTrackLock sync.Mutex +} + +func NewUDPProxy(frontendAddr, backendAddr *net.UDPAddr) (*UDPProxy, error) { + listener, err := net.ListenUDP("udp", frontendAddr) + if err != nil { + return nil, err + } + return &UDPProxy{ + listener: listener, + frontendAddr: listener.LocalAddr().(*net.UDPAddr), + backendAddr: backendAddr, + connTrackTable: make(connTrackMap), + }, nil +} + +func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr, clientKey *connTrackKey) { + defer func() { + proxy.connTrackLock.Lock() + delete(proxy.connTrackTable, *clientKey) + proxy.connTrackLock.Unlock() + proxyConn.Close() + }() + + readBuf := make([]byte, UDPBufSize) + for { + proxyConn.SetReadDeadline(time.Now().Add(UDPConnTrackTimeout)) + again: + read, err := proxyConn.Read(readBuf) + if err != nil { + if err, ok := err.(*net.OpError); ok && err.Err == syscall.ECONNREFUSED { + // This will happen if the last write failed + // (e.g: nothing is actually listening on the + // proxied port on the container), ignore it + // and continue until UDPConnTrackTimeout + // expires: + goto again + } + return + } + for i := 0; i != read; { + written, err := proxy.listener.WriteToUDP(readBuf[i:read], clientAddr) + if err != nil { + return + } + i += written + } + } +} + +func (proxy *UDPProxy) Run() { + readBuf := make([]byte, UDPBufSize) + for { + read, from, err := proxy.listener.ReadFromUDP(readBuf) + if err != nil { + // NOTE: Apparently ReadFrom doesn't return + // ECONNREFUSED like Read do (see comment in + // UDPProxy.replyLoop) + if !isClosedError(err) { + logrus.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) + } + break + } + + fromKey := newConnTrackKey(from) + proxy.connTrackLock.Lock() + proxyConn, hit := proxy.connTrackTable[*fromKey] + if !hit { + proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) + if err != nil { + logrus.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) + proxy.connTrackLock.Unlock() + continue + } + proxy.connTrackTable[*fromKey] = proxyConn + go proxy.replyLoop(proxyConn, from, fromKey) + } + proxy.connTrackLock.Unlock() + for i := 0; i != read; { + written, err := proxyConn.Write(readBuf[i:read]) + if err != nil { + logrus.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) + break + } + i += written + } + } +} + +func (proxy *UDPProxy) Close() { + proxy.listener.Close() + proxy.connTrackLock.Lock() + defer proxy.connTrackLock.Unlock() + for _, conn := range proxy.connTrackTable { + conn.Close() + } +} + +func (proxy *UDPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } +func (proxy *UDPProxy) BackendAddr() net.Addr { return proxy.backendAddr } + +func isClosedError(err error) bool { + /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. + * See: + * http://golang.org/src/pkg/net/net.go + * https://code.google.com/p/go/issues/detail?id=4337 + * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ + */ + return strings.HasSuffix(err.Error(), "use of closed network connection") +} diff --git a/pkg/pubsub/publisher.go b/pkg/pubsub/publisher.go new file mode 100644 index 00000000..ab457cfb --- /dev/null +++ b/pkg/pubsub/publisher.go @@ -0,0 +1,84 @@ +package pubsub + +import ( + "sync" + "time" +) + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]struct{}), + } +} + +type subscriber chan interface{} + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]struct{} +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = struct{}{} + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + for sub := range p.subscribers { + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + continue + } + select { + case sub <- v: + default: + } + } + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} diff --git a/pkg/pubsub/publisher_test.go b/pkg/pubsub/publisher_test.go new file mode 100644 index 00000000..d6b0a1d5 --- /dev/null +++ b/pkg/pubsub/publisher_test.go @@ -0,0 +1,142 @@ +package pubsub + +import ( + "fmt" + "testing" + "time" +) + +func TestSendToOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + c := p.Subscribe() + + p.Publish("hi") + + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestSendToMultipleSubs(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + + p.Publish("hi") + + for _, c := range subs { + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } + } +} + +func TestEvictOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + s1 := p.Subscribe() + s2 := p.Subscribe() + + p.Evict(s1) + p.Publish("hi") + if _, ok := <-s1; ok { + t.Fatal("expected s1 to not receive the published message") + } + + msg := <-s2 + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestClosePublisher(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + p.Close() + + for _, c := range subs { + if _, ok := <-c; ok { + t.Fatal("expected all subscriber channels to be closed") + } + } +} + +const sampleText = "test" + +type testSubscriber struct { + dataCh chan interface{} + ch chan error +} + +func (s *testSubscriber) Wait() error { + return <-s.ch +} + +func newTestSubscriber(p *Publisher) *testSubscriber { + ts := &testSubscriber{ + dataCh: p.Subscribe(), + ch: make(chan error), + } + go func() { + for data := range ts.dataCh { + s, ok := data.(string) + if !ok { + ts.ch <- fmt.Errorf("Unexpected type %T", data) + break + } + if s != sampleText { + ts.ch <- fmt.Errorf("Unexpected text %s", s) + break + } + } + close(ts.ch) + }() + return ts +} + +// for testing with -race +func TestPubSubRace(t *testing.T) { + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + s.Wait() + } +} + +func BenchmarkPubSub(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + if err := s.Wait(); err != nil { + b.Fatal(err) + } + } + } +} diff --git a/pkg/random/random.go b/pkg/random/random.go new file mode 100644 index 00000000..05b7f7fb --- /dev/null +++ b/pkg/random/random.go @@ -0,0 +1,34 @@ +package random + +import ( + "math/rand" + "sync" + "time" +) + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + return &lockedSource{ + src: rand.NewSource(time.Now().UnixNano()), + } +} diff --git a/pkg/random/random_test.go b/pkg/random/random_test.go new file mode 100644 index 00000000..cf405f78 --- /dev/null +++ b/pkg/random/random_test.go @@ -0,0 +1,22 @@ +package random + +import ( + "math/rand" + "sync" + "testing" +) + +// for go test -v -race +func TestConcurrency(t *testing.T) { + rnd := rand.New(NewSource()) + var wg sync.WaitGroup + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + rnd.Int63() + wg.Done() + }() + } + wg.Wait() +} diff --git a/pkg/reexec/README.md b/pkg/reexec/README.md new file mode 100644 index 00000000..45592ce8 --- /dev/null +++ b/pkg/reexec/README.md @@ -0,0 +1,5 @@ +## reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/pkg/reexec/command_linux.go b/pkg/reexec/command_linux.go new file mode 100644 index 00000000..3c3a73a9 --- /dev/null +++ b/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which have Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/pkg/reexec/command_unsupported.go b/pkg/reexec/command_unsupported.go new file mode 100644 index 00000000..630eecbd --- /dev/null +++ b/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux and Windows. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/pkg/reexec/command_windows.go b/pkg/reexec/command_windows.go new file mode 100644 index 00000000..8d65e0ae --- /dev/null +++ b/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which have Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/pkg/reexec/reexec.go b/pkg/reexec/reexec.go new file mode 100644 index 00000000..20491e05 --- /dev/null +++ b/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registred under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we coudn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/pkg/signal/README.md b/pkg/signal/README.md new file mode 100644 index 00000000..2b237a59 --- /dev/null +++ b/pkg/signal/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go new file mode 100644 index 00000000..63337542 --- /dev/null +++ b/pkg/signal/signal.go @@ -0,0 +1,19 @@ +package signal + +import ( + "os" + "os/signal" +) + +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} diff --git a/pkg/signal/signal_darwin.go b/pkg/signal/signal_darwin.go new file mode 100644 index 00000000..fcd3a8f2 --- /dev/null +++ b/pkg/signal/signal_darwin.go @@ -0,0 +1,40 @@ +package signal + +import ( + "syscall" +) + +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go new file mode 100644 index 00000000..102e9184 --- /dev/null +++ b/pkg/signal/signal_freebsd.go @@ -0,0 +1,42 @@ +package signal + +import ( + "syscall" +) + +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go new file mode 100644 index 00000000..a62f79d4 --- /dev/null +++ b/pkg/signal/signal_linux.go @@ -0,0 +1,43 @@ +package signal + +import ( + "syscall" +) + +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUS": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CLD": syscall.SIGCLD, + "CONT": syscall.SIGCONT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "POLL": syscall.SIGPOLL, + "PROF": syscall.SIGPROF, + "PWR": syscall.SIGPWR, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STKFLT": syscall.SIGSTKFLT, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "UNUSED": syscall.SIGUNUSED, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_unix.go b/pkg/signal/signal_unix.go new file mode 100644 index 00000000..613e30e5 --- /dev/null +++ b/pkg/signal/signal_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) +const SIGCHLD = syscall.SIGCHLD +const SIGWINCH = syscall.SIGWINCH diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go new file mode 100644 index 00000000..99f94659 --- /dev/null +++ b/pkg/signal/signal_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux,!darwin,!freebsd + +package signal + +import ( + "syscall" +) + +var SignalMap = map[string]syscall.Signal{} diff --git a/pkg/signal/signal_windows.go b/pkg/signal/signal_windows.go new file mode 100644 index 00000000..9f00b999 --- /dev/null +++ b/pkg/signal/signal_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) +const SIGCHLD = syscall.Signal(0xff) +const SIGWINCH = syscall.Signal(0xff) diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go new file mode 100644 index 00000000..3772db5e --- /dev/null +++ b/pkg/signal/trap.go @@ -0,0 +1,64 @@ +package signal + +import ( + "os" + gosignal "os/signal" + "runtime" + "sync/atomic" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + go func(sig os.Signal) { + logrus.Infof("Processing signal '%v'", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logrus.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks() + logrus.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +func DumpStacks() { + buf := make([]byte, 16384) + buf = buf[:runtime.Stack(buf, true)] + // Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine + // traces won't show up in the log. + logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) +} diff --git a/pkg/sockets/README.md b/pkg/sockets/README.md new file mode 100644 index 00000000..e69de29b diff --git a/pkg/sockets/tcp_socket.go b/pkg/sockets/tcp_socket.go new file mode 100644 index 00000000..746270c3 --- /dev/null +++ b/pkg/sockets/tcp_socket.go @@ -0,0 +1,37 @@ +package sockets + +import ( + "crypto/tls" + "net" + "net/http" + "time" + + "github.com/docker/docker/pkg/listenbuffer" +) + +func NewTcpSocket(addr string, tlsConfig *tls.Config, activate <-chan struct{}) (net.Listener, error) { + l, err := listenbuffer.NewListenBuffer("tcp", addr, activate) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} + +func ConfigureTCPTransport(tr *http.Transport, proto, addr string) { + // Why 32? See https://github.com/docker/docker/pull/8035. + timeout := 32 * time.Second + if proto == "unix" { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, timeout) + } + } else { + tr.Proxy = http.ProxyFromEnvironment + tr.Dial = (&net.Dialer{Timeout: timeout}).Dial + } +} diff --git a/pkg/sockets/unix_socket.go b/pkg/sockets/unix_socket.go new file mode 100644 index 00000000..fde11f5f --- /dev/null +++ b/pkg/sockets/unix_socket.go @@ -0,0 +1,80 @@ +// +build linux + +package sockets + +import ( + "fmt" + "net" + "os" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/listenbuffer" + "github.com/opencontainers/runc/libcontainer/user" +) + +func NewUnixSocket(path, group string, activate <-chan struct{}) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + l, err := listenbuffer.NewListenBuffer("unix", path, activate) + if err != nil { + return nil, err + } + if err := setSocketGroup(path, group); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} + +func setSocketGroup(path, group string) error { + if group == "" { + return nil + } + if err := changeGroup(path, group); err != nil { + if group != "docker" { + return err + } + logrus.Debugf("Warning: could not change group %s to docker: %v", path, err) + } + return nil +} + +func changeGroup(path string, nameOrGid string) error { + gid, err := lookupGidByName(nameOrGid) + if err != nil { + return err + } + logrus.Debugf("%s group found. gid: %d", nameOrGid, gid) + return os.Chown(path, 0, gid) +} + +func lookupGidByName(nameOrGid string) (int, error) { + groupFile, err := user.GetGroupPath() + if err != nil { + return -1, err + } + groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { + return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid + }) + if err != nil { + return -1, err + } + if groups != nil && len(groups) > 0 { + return groups[0].Gid, nil + } + gid, err := strconv.Atoi(nameOrGid) + if err == nil { + logrus.Warnf("Could not find GID %d", gid) + return gid, nil + } + return -1, fmt.Errorf("Group %s not found", nameOrGid) +} diff --git a/pkg/stdcopy/stdcopy.go b/pkg/stdcopy/stdcopy.go new file mode 100644 index 00000000..684b4d4c --- /dev/null +++ b/pkg/stdcopy/stdcopy.go @@ -0,0 +1,168 @@ +package stdcopy + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/Sirupsen/logrus" +) + +const ( + StdWriterPrefixLen = 8 + StdWriterFdIndex = 0 + StdWriterSizeIndex = 4 +) + +type StdType [StdWriterPrefixLen]byte + +var ( + Stdin StdType = StdType{0: 0} + Stdout StdType = StdType{0: 1} + Stderr StdType = StdType{0: 2} +) + +type StdWriter struct { + io.Writer + prefix StdType + sizeBuf []byte +} + +func (w *StdWriter) Write(buf []byte) (n int, err error) { + var n1, n2 int + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) + n1, err = w.Writer.Write(w.prefix[:]) + if err != nil { + n = n1 - StdWriterPrefixLen + } else { + n2, err = w.Writer.Write(buf) + n = n1 + n2 - StdWriterPrefixLen + } + if n < 0 { + n = 0 + } + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) *StdWriter { + return &StdWriter{ + Writer: w, + prefix: t, + sizeBuf: make([]byte, 4), + } +} + +var ErrInvalidStdHeader = errors.New("Unrecognized input header") + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, 32*1024+StdWriterPrefixLen+1) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < StdWriterPrefixLen { + logrus.Debugf("Corrupted prefix: %v", buf[:nr]) + return written, nil + } + break + } + if er != nil { + logrus.Debugf("Error reading header: %s", er) + return 0, er + } + } + + // Check the first byte to know where to write + switch buf[StdWriterFdIndex] { + case 0: + fallthrough + case 1: + // Write on stdout + out = dstout + case 2: + // Write on stderr + out = dsterr + default: + logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) + return 0, ErrInvalidStdHeader + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) + logrus.Debugf("framesize: %d", frameSize) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+StdWriterPrefixLen > bufLen { + logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf)) + buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+StdWriterPrefixLen { + logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr]) + return written, nil + } + break + } + if er != nil { + logrus.Debugf("Error reading frame: %s", er) + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) + if ew != nil { + logrus.Debugf("Error writing frame: %s", ew) + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+StdWriterPrefixLen:]) + // Move the index + nr -= frameSize + StdWriterPrefixLen + } +} diff --git a/pkg/stdcopy/stdcopy_test.go b/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 00000000..a9fd73a4 --- /dev/null +++ b/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,85 @@ +package stdcopy + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" +) + +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUnitializedStdWriter(t *testing.T) { + writer := StdWriter{ + Writer: nil, + prefix: Stdout, + sizeBuf: make([]byte, 4), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have writen %d byte but wrote %d.", len(data), n) + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/streamformatter/streamformatter.go b/pkg/streamformatter/streamformatter.go new file mode 100644 index 00000000..792ce00f --- /dev/null +++ b/pkg/streamformatter/streamformatter.go @@ -0,0 +1,115 @@ +package streamformatter + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" +) + +type StreamFormatter struct { + json bool +} + +// NewStreamFormatter returns a simple StreamFormatter +func NewStreamFormatter() *StreamFormatter { + return &StreamFormatter{} +} + +// NewJSONStreamFormatter returns a StreamFormatter configured to stream json +func NewJSONStreamFormatter() *StreamFormatter { + return &StreamFormatter{true} +} + +const streamNewline = "\r\n" + +var streamNewlineBytes = []byte(streamNewline) + +func (sf *StreamFormatter) FormatStream(str string) []byte { + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + "\r") +} + +func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + streamNewline) +} + +func (sf *StreamFormatter) FormatError(err error) []byte { + if sf.json { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return append(b, streamNewlineBytes...) + } + return []byte("{\"error\":\"format error\"}" + streamNewline) + } + return []byte("Error: " + err.Error() + streamNewline) +} + +func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + }) + if err != nil { + return nil + } + return b + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +type StdoutFormater struct { + io.Writer + *StreamFormatter +} + +func (sf *StdoutFormater) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +type StderrFormater struct { + io.Writer + *StreamFormatter +} + +func (sf *StderrFormater) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} diff --git a/pkg/streamformatter/streamformatter_test.go b/pkg/streamformatter/streamformatter_test.go new file mode 100644 index 00000000..acf81bef --- /dev/null +++ b/pkg/streamformatter/streamformatter_test.go @@ -0,0 +1,93 @@ +package streamformatter + +import ( + "encoding/json" + "errors" + "reflect" + "testing" + + "github.com/docker/docker/pkg/jsonmessage" +) + +func TestFormatStream(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatStream("stream") + if string(res) != "stream"+"\r" { + t.Fatalf("%q", res) + } +} + +func TestFormatJSONStatus(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != "a1\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatSimpleError(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != "Error: Error for formatter\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatStream(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatStream("stream") + if string(res) != `{"stream":"stream"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatStatus(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatSimpleError(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatJSONError(t *testing.T) { + sf := NewJSONStreamFormatter() + err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} + res := sf.FormatError(err) + if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatProgress(t *testing.T) { + sf := NewJSONStreamFormatter() + progress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.FormatProgress("id", "action", progress) + msg := &jsonmessage.JSONMessage{} + if err := json.Unmarshal(res, msg); err != nil { + t.Fatal(err) + } + if msg.ID != "id" { + t.Fatalf("ID must be 'id', got: %s", msg.ID) + } + if msg.Status != "action" { + t.Fatalf("Status must be 'action', got: %s", msg.Status) + } + if msg.ProgressMessage != progress.String() { + t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage) + } + if !reflect.DeepEqual(msg.Progress, progress) { + t.Fatal("Original progress not equals progress from FormatProgress") + } +} diff --git a/pkg/stringid/README.md b/pkg/stringid/README.md new file mode 100644 index 00000000..37a5098f --- /dev/null +++ b/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/pkg/stringid/stringid.go b/pkg/stringid/stringid.go new file mode 100644 index 00000000..6a683b68 --- /dev/null +++ b/pkg/stringid/stringid.go @@ -0,0 +1,48 @@ +package stringid + +import ( + "crypto/rand" + "encoding/hex" + "io" + "regexp" + "strconv" +) + +const shortLen = 12 + +var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") + +// Determine if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a langer prefix, or the full-length Id. +func TruncateID(id string) string { + trimTo := shortLen + if len(id) < shortLen { + trimTo = len(id) + } + return id[:trimTo] +} + +// GenerateRandomID returns an unique id +func GenerateRandomID() string { + for { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + panic(err) // This shouldn't happen + } + value := hex.EncodeToString(id) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numberic and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { + continue + } + return value + } +} diff --git a/pkg/stringid/stringid_test.go b/pkg/stringid/stringid_test.go new file mode 100644 index 00000000..bcb13654 --- /dev/null +++ b/pkg/stringid/stringid_test.go @@ -0,0 +1,56 @@ +package stringid + +import ( + "strings" + "testing" +) + +func TestGenerateRandomID(t *testing.T) { + id := GenerateRandomID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestShortenId(t *testing.T) { + id := GenerateRandomID() + truncID := TruncateID(id) + if len(truncID) != 12 { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdEmpty(t *testing.T) { + id := "" + truncID := TruncateID(id) + if len(truncID) > len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdInvalid(t *testing.T) { + id := "1234" + truncID := TruncateID(id) + if len(truncID) != len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestIsShortIDNonHex(t *testing.T) { + id := "some non-hex value" + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} + +func TestIsShortIDNotCorrectSize(t *testing.T) { + id := strings.Repeat("a", shortLen+1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } + id = strings.Repeat("a", shortLen-1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} diff --git a/pkg/stringutils/README.md b/pkg/stringutils/README.md new file mode 100644 index 00000000..b3e45457 --- /dev/null +++ b/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/pkg/stringutils/stringutils.go b/pkg/stringutils/stringutils.go new file mode 100644 index 00000000..aee2648b --- /dev/null +++ b/pkg/stringutils/stringutils.go @@ -0,0 +1,87 @@ +package stringutils + +import ( + "bytes" + "math/rand" + "strings" + + "github.com/docker/docker/pkg/random" +) + +// Generate alpha only random stirng with length n +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + r := rand.New(random.NewSource()) + for i := range b { + b[i] = letters[r.Intn(len(letters))] + } + return string(b) +} + +// Generate Ascii random stirng with length n +func GenerateRandomAsciiString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Truncate a string to maxlen +func Truncate(s string, maxlen int) string { + if len(s) <= maxlen { + return s + } + return s[:maxlen] +} + +// Test wheather a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// Take a list of strings and escape them so they will be handled right +// when passed as arguments to an program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/pkg/stringutils/stringutils_test.go b/pkg/stringutils/stringutils_test.go new file mode 100644 index 00000000..124b2551 --- /dev/null +++ b/pkg/stringutils/stringutils_test.go @@ -0,0 +1,105 @@ +package stringutils + +import "testing" + +func testLengthHelper(generator func(int) string, t *testing.T) { + expectedLength := 20 + s := generator(expectedLength) + if len(s) != expectedLength { + t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) + } +} + +func testUniquenessHelper(generator func(int) string, t *testing.T) { + repeats := 25 + set := make(map[string]struct{}, repeats) + for i := 0; i < repeats; i = i + 1 { + str := generator(64) + if len(str) != 64 { + t.Fatalf("Id returned is incorrect: %s", str) + } + if _, ok := set[str]; ok { + t.Fatalf("Random number is repeated") + } + set[str] = struct{}{} + } +} + +func isASCII(s string) bool { + for _, c := range s { + if c > 127 { + return false + } + } + return true +} + +func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAsciiStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAsciiString, t) +} + +func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAsciiString, t) +} + +func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { + str := GenerateRandomAsciiString(64) + if !isASCII(str) { + t.Fatalf("%s contained non-ascii characters", str) + } +} + +func TestTruncate(t *testing.T) { + str := "teststring" + newstr := Truncate(str, 4) + if newstr != "test" { + t.Fatalf("Expected test, got %s", newstr) + } + newstr = Truncate(str, 20) + if newstr != "teststring" { + t.Fatalf("Expected teststring, got %s", newstr) + } +} + +func TestInSlice(t *testing.T) { + slice := []string{"test", "in", "slice"} + + test := InSlice(slice, "test") + if !test { + t.Fatalf("Expected string test to be in slice") + } + test = InSlice(slice, "SLICE") + if !test { + t.Fatalf("Expected string SLICE to be in slice") + } + test = InSlice(slice, "notinslice") + if test { + t.Fatalf("Expected string notinslice not to be in slice") + } +} + +func TestShellQuoteArgumentsEmpty(t *testing.T) { + actual := ShellQuoteArguments([]string{}) + expected := "" + if actual != expected { + t.Fatalf("Expected an empty string") + } +} + +func TestShellQuoteArguments(t *testing.T) { + simpleString := "simpleString" + complexString := "This is a 'more' complex $tring with some special char *" + actual := ShellQuoteArguments([]string{simpleString, complexString}) + expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" + if actual != expected { + t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) + } +} diff --git a/pkg/symlink/LICENSE.APACHE b/pkg/symlink/LICENSE.APACHE new file mode 100644 index 00000000..9e4bd4db --- /dev/null +++ b/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/symlink/LICENSE.BSD b/pkg/symlink/LICENSE.BSD new file mode 100644 index 00000000..ac74d8f0 --- /dev/null +++ b/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/symlink/README.md b/pkg/symlink/README.md new file mode 100644 index 00000000..0d1dbb70 --- /dev/null +++ b/pkg/symlink/README.md @@ -0,0 +1,5 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go new file mode 100644 index 00000000..b4bdff24 --- /dev/null +++ b/pkg/symlink/fs.go @@ -0,0 +1,131 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(path) + if err != nil { + return "", err + } + root, err = filepath.Abs(root) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if cleanP == string(filepath.Separator) { + // never Lstat "/" itself + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p + string(filepath.Separator)) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} diff --git a/pkg/symlink/fs_test.go b/pkg/symlink/fs_test.go new file mode 100644 index 00000000..89209484 --- /dev/null +++ b/pkg/symlink/fs_test.go @@ -0,0 +1,402 @@ +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + +package symlink + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +type dirOrLink struct { + path string + target string +} + +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} + +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("expected an error") + } +} + +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatalf("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/sysinfo/README.md b/pkg/sysinfo/README.md new file mode 100644 index 00000000..c1530cef --- /dev/null +++ b/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go new file mode 100644 index 00000000..8f905aa0 --- /dev/null +++ b/pkg/sysinfo/sysinfo.go @@ -0,0 +1,25 @@ +package sysinfo + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + AppArmor bool + *cgroupMemInfo + *cgroupCpuInfo + IPv4ForwardingDisabled bool + BridgeNfCallIptablesDisabled bool + BridgeNfCallIp6tablesDisabled bool + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + MemoryLimit bool + SwapLimit bool + OomKillDisable bool + MemorySwappiness bool +} + +type cgroupCpuInfo struct { + CpuCfsPeriod bool + CpuCfsQuota bool +} diff --git a/pkg/sysinfo/sysinfo_linux.go b/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 00000000..f864dcdd --- /dev/null +++ b/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,94 @@ +package sysinfo + +import ( + "io/ioutil" + "os" + "path" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +// New returns a new SysInfo, using the filesystem to detect which features the kernel supports. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + sysInfo.cgroupMemInfo = checkCgroupMem(quiet) + sysInfo.cgroupCpuInfo = checkCgroupCpu(quiet) + + _, err := cgroups.FindCgroupMountpoint("devices") + sysInfo.CgroupDevicesEnabled = err == nil + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNfCallIptablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNfCallIp6tablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + return sysInfo +} + +func checkCgroupMem(quiet bool) *cgroupMemInfo { + info := &cgroupMemInfo{} + mountPoint, err := cgroups.FindCgroupMountpoint("memory") + if err != nil { + if !quiet { + logrus.Warnf("Your kernel does not support cgroup memory limit: %v", err) + } + return info + } + info.MemoryLimit = true + + info.SwapLimit = cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !info.SwapLimit { + logrus.Warn("Your kernel does not support swap memory limit.") + } + info.OomKillDisable = cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !info.OomKillDisable { + logrus.Warnf("Your kernel does not support oom control.") + } + info.MemorySwappiness = cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !info.MemorySwappiness { + logrus.Warnf("Your kernel does not support memory swappiness.") + } + + return info +} + +func checkCgroupCpu(quiet bool) *cgroupCpuInfo { + info := &cgroupCpuInfo{} + mountPoint, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return info + } + + info.CpuCfsPeriod = cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !info.CpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + info.CpuCfsQuota = cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !info.CpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + return info +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/pkg/sysinfo/sysinfo_linux_test.go b/pkg/sysinfo/sysinfo_linux_test.go new file mode 100644 index 00000000..fae0fdff --- /dev/null +++ b/pkg/sysinfo/sysinfo_linux_test.go @@ -0,0 +1,58 @@ +package sysinfo + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +func TestReadProcBool(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + procFile := filepath.Join(tmpDir, "read-proc-bool") + if err := ioutil.WriteFile(procFile, []byte("1"), 644); err != nil { + t.Fatal(err) + } + + if !readProcBool(procFile) { + t.Fatal("expected proc bool to be true, got false") + } + + if err := ioutil.WriteFile(procFile, []byte("0"), 644); err != nil { + t.Fatal(err) + } + if readProcBool(procFile) { + t.Fatal("expected proc bool to be false, got false") + } + + if readProcBool(path.Join(tmpDir, "no-exist")) { + t.Fatal("should be false for non-existent entry") + } + +} + +func TestCgroupEnabled(t *testing.T) { + cgroupDir, err := ioutil.TempDir("", "cgroup-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(cgroupDir) + + if cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be false") + } + + if err := ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 644); err != nil { + t.Fatal(err) + } + + if !cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be true") + } +} diff --git a/pkg/sysinfo/sysinfo_windows.go b/pkg/sysinfo/sysinfo_windows.go new file mode 100644 index 00000000..b4d31519 --- /dev/null +++ b/pkg/sysinfo/sysinfo_windows.go @@ -0,0 +1,7 @@ +package sysinfo + +// TODO Windows +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/pkg/system/errors.go b/pkg/system/errors.go new file mode 100644 index 00000000..63045186 --- /dev/null +++ b/pkg/system/errors.go @@ -0,0 +1,9 @@ +package system + +import ( + "errors" +) + +var ( + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/pkg/system/events_windows.go b/pkg/system/events_windows.go new file mode 100644 index 00000000..23f7c618 --- /dev/null +++ b/pkg/system/events_windows.go @@ -0,0 +1,83 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" +) + +const ( + EVENT_ALL_ACCESS = 0x1F0003 + EVENT_MODIFY_STATUS = 0x0002 +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 = 0 + if manualReset { + _p1 = 1 + } + var _p2 uint32 = 0 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 = 0 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/pkg/system/filesys.go b/pkg/system/filesys.go new file mode 100644 index 00000000..e1f70e8d --- /dev/null +++ b/pkg/system/filesys.go @@ -0,0 +1,11 @@ +// +build !windows + +package system + +import ( + "os" +) + +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} diff --git a/pkg/system/filesys_windows.go b/pkg/system/filesys_windows.go new file mode 100644 index 00000000..90b50060 --- /dev/null +++ b/pkg/system/filesys_windows.go @@ -0,0 +1,64 @@ +// +build windows + +package system + +import ( + "os" + "regexp" + "syscall" +) + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, perm os.FileMode) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} diff --git a/pkg/system/lstat.go b/pkg/system/lstat.go new file mode 100644 index 00000000..d0e43b37 --- /dev/null +++ b/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*Stat_t, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/pkg/system/lstat_test.go b/pkg/system/lstat_test.go new file mode 100644 index 00000000..6bac492e --- /dev/null +++ b/pkg/system/lstat_test.go @@ -0,0 +1,28 @@ +package system + +import ( + "os" + "testing" +) + +// TestLstat tests Lstat for existing and non existing files +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/pkg/system/lstat_windows.go b/pkg/system/lstat_windows.go new file mode 100644 index 00000000..eee1be26 --- /dev/null +++ b/pkg/system/lstat_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package system + +import ( + "os" +) + +// Some explanation for my own sanity, and hopefully maintainers in the +// future. +// +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. + +func Lstat(path string) (*Stat_t, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &Stat_t{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/pkg/system/meminfo.go b/pkg/system/meminfo.go new file mode 100644 index 00000000..3b6e947e --- /dev/null +++ b/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/pkg/system/meminfo_linux.go b/pkg/system/meminfo_linux.go new file mode 100644 index 00000000..e2ca1400 --- /dev/null +++ b/pkg/system/meminfo_linux.go @@ -0,0 +1,71 @@ +package system + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/docker/pkg/units" +) + +var ( + ErrMalformed = errors.New("malformed file") +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given a io.Reader to the file. +// +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/pkg/system/meminfo_linux_test.go b/pkg/system/meminfo_linux_test.go new file mode 100644 index 00000000..10ddf796 --- /dev/null +++ b/pkg/system/meminfo_linux_test.go @@ -0,0 +1,38 @@ +package system + +import ( + "strings" + "testing" + + "github.com/docker/docker/pkg/units" +) + +// TestMemInfo tests parseMemInfo with a static meminfo string +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/pkg/system/meminfo_unsupported.go b/pkg/system/meminfo_unsupported.go new file mode 100644 index 00000000..604d3387 --- /dev/null +++ b/pkg/system/meminfo_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package system + +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/pkg/system/meminfo_windows.go b/pkg/system/meminfo_windows.go new file mode 100644 index 00000000..d4664259 --- /dev/null +++ b/pkg/system/meminfo_windows.go @@ -0,0 +1,44 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/pkg/system/mknod.go b/pkg/system/mknod.go new file mode 100644 index 00000000..26617eb0 --- /dev/null +++ b/pkg/system/mknod.go @@ -0,0 +1,20 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/pkg/system/mknod_windows.go b/pkg/system/mknod_windows.go new file mode 100644 index 00000000..1811542a --- /dev/null +++ b/pkg/system/mknod_windows.go @@ -0,0 +1,11 @@ +// +build windows + +package system + +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/pkg/system/stat.go b/pkg/system/stat.go new file mode 100644 index 00000000..e2ecfe52 --- /dev/null +++ b/pkg/system/stat.go @@ -0,0 +1,46 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Stat_t type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file +type Stat_t struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +func (s Stat_t) Mode() uint32 { + return s.mode +} + +func (s Stat_t) Uid() uint32 { + return s.uid +} + +func (s Stat_t) Gid() uint32 { + return s.gid +} + +func (s Stat_t) Rdev() uint64 { + return s.rdev +} + +func (s Stat_t) Size() int64 { + return s.size +} + +func (s Stat_t) Mtim() syscall.Timespec { + return s.mtim +} + +func (s Stat_t) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/pkg/system/stat_linux.go b/pkg/system/stat_linux.go new file mode 100644 index 00000000..80262d95 --- /dev/null +++ b/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return &Stat_t{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.Stat_t from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*Stat_t, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/pkg/system/stat_test.go b/pkg/system/stat_test.go new file mode 100644 index 00000000..45341292 --- /dev/null +++ b/pkg/system/stat_test.go @@ -0,0 +1,37 @@ +package system + +import ( + "os" + "syscall" + "testing" +) + +// TestFromStatT tests fromStatT for a tempfile +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.Uid() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.Gid() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/pkg/system/stat_unsupported.go b/pkg/system/stat_unsupported.go new file mode 100644 index 00000000..7e0d0348 --- /dev/null +++ b/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.Stat_t type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return &Stat_t{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/pkg/system/stat_windows.go b/pkg/system/stat_windows.go new file mode 100644 index 00000000..b1fd39e8 --- /dev/null +++ b/pkg/system/stat_windows.go @@ -0,0 +1,36 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +type Stat_t struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +func (s Stat_t) Name() string { + return s.name +} + +func (s Stat_t) Size() int64 { + return s.size +} + +func (s Stat_t) Mode() os.FileMode { + return s.mode +} + +func (s Stat_t) ModTime() time.Time { + return s.modTime +} + +func (s Stat_t) IsDir() bool { + return s.isDir +} diff --git a/pkg/system/umask.go b/pkg/system/umask.go new file mode 100644 index 00000000..fddbecd3 --- /dev/null +++ b/pkg/system/umask.go @@ -0,0 +1,11 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/pkg/system/umask_windows.go b/pkg/system/umask_windows.go new file mode 100644 index 00000000..3be563f8 --- /dev/null +++ b/pkg/system/umask_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package system + +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/pkg/system/utimes_darwin.go b/pkg/system/utimes_darwin.go new file mode 100644 index 00000000..4c6002fe --- /dev/null +++ b/pkg/system/utimes_darwin.go @@ -0,0 +1,11 @@ +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/pkg/system/utimes_freebsd.go b/pkg/system/utimes_freebsd.go new file mode 100644 index 00000000..ceaa044c --- /dev/null +++ b/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" +) + +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/pkg/system/utimes_linux.go b/pkg/system/utimes_linux.go new file mode 100644 index 00000000..8f902982 --- /dev/null +++ b/pkg/system/utimes_linux.go @@ -0,0 +1,28 @@ +package system + +import ( + "syscall" + "unsafe" +) + +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + AT_FDCWD := -100 + AT_SYMLINK_NOFOLLOW := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/pkg/system/utimes_test.go b/pkg/system/utimes_test.go new file mode 100644 index 00000000..350cce1e --- /dev/null +++ b/pkg/system/utimes_test.go @@ -0,0 +1,66 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +// prepareFiles creates files for testing in the temp directory +func prepareFiles(t *testing.T) (string, string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink, dir +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{0, 0}, {0, 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/pkg/system/utimes_unsupported.go b/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000..adf2734f --- /dev/null +++ b/pkg/system/utimes_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux,!freebsd,!darwin + +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/pkg/system/xattrs_linux.go b/pkg/system/xattrs_linux.go new file mode 100644 index 00000000..00edb201 --- /dev/null +++ b/pkg/system/xattrs_linux.go @@ -0,0 +1,59 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Returns a nil slice and nil error if the xattr is not set +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/pkg/system/xattrs_unsupported.go b/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000..0060c167 --- /dev/null +++ b/pkg/system/xattrs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package system + +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/pkg/systemd/booted.go b/pkg/systemd/booted.go new file mode 100644 index 00000000..2aae931e --- /dev/null +++ b/pkg/systemd/booted.go @@ -0,0 +1,15 @@ +package systemd + +import ( + "os" +) + +// Conversion to Go of systemd's sd_booted() +func SdBooted() bool { + s, err := os.Stat("/run/systemd/system") + if err != nil { + return false + } + + return s.IsDir() +} diff --git a/pkg/systemd/listendfd.go b/pkg/systemd/listendfd.go new file mode 100644 index 00000000..0fbc0a6a --- /dev/null +++ b/pkg/systemd/listendfd.go @@ -0,0 +1,40 @@ +package systemd + +import ( + "errors" + "net" + "strconv" + + "github.com/coreos/go-systemd/activation" +) + +// ListenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func ListenFD(addr string) ([]net.Listener, error) { + // socket activation + listeners, err := activation.Listeners(false) + if err != nil { + return nil, err + } + + if listeners == nil || len(listeners) == 0 { + return nil, errors.New("No sockets found") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" { + addr = "*" + } + + fdNum, _ := strconv.Atoi(addr) + fdOffset := fdNum - 3 + if (addr != "*") && (len(listeners) < int(fdOffset)+1) { + return nil, errors.New("Too few socket activated files passed in") + } + + if addr == "*" { + return listeners, nil + } + + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/pkg/systemd/sd_notify.go b/pkg/systemd/sd_notify.go new file mode 100644 index 00000000..1993cab9 --- /dev/null +++ b/pkg/systemd/sd_notify.go @@ -0,0 +1,33 @@ +package systemd + +import ( + "errors" + "net" + "os" +) + +var SdNotifyNoSocket = errors.New("No socket") + +// Send a message to the init daemon. It is common to ignore the error. +func SdNotify(state string) error { + socketAddr := &net.UnixAddr{ + Name: os.Getenv("NOTIFY_SOCKET"), + Net: "unixgram", + } + + if socketAddr.Name == "" { + return SdNotifyNoSocket + } + + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + if err != nil { + return err + } + + _, err = conn.Write([]byte(state)) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/tailfile/tailfile.go b/pkg/tailfile/tailfile.go new file mode 100644 index 00000000..92aea460 --- /dev/null +++ b/pkg/tailfile/tailfile.go @@ -0,0 +1,62 @@ +package tailfile + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") +var ErrNonPositiveLinesNumber = errors.New("Lines number must be positive") + +//TailFile returns last n lines of file f +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(step, os.SEEK_END); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/pkg/tailfile/tailfile_test.go b/pkg/tailfile/tailfile_test.go new file mode 100644 index 00000000..31217c03 --- /dev/null +++ b/pkg/tailfile/tailfile_test.go @@ -0,0 +1,148 @@ +package tailfile + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestTailFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +third line +fourth line +fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last first line +next first line +next second line +next third line +next fourth line +next fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last second line +last third line +last fourth line +last fifth line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"last fourth line", "last fifth line"} + res, err := TailFile(f, 2) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailFileManyLines(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"first line", "second line"} + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailEmptyFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + if len(res) != 0 { + t.Fatal("Must be empty slice from empty file") + } +} + +func TestTailNegativeN(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } + if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } +} + +func BenchmarkTail(b *testing.B) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + b.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + for i := 0; i < 10000; i++ { + if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := TailFile(f, 1000); err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/tarsum/builder_context.go b/pkg/tarsum/builder_context.go new file mode 100644 index 00000000..06a42825 --- /dev/null +++ b/pkg/tarsum/builder_context.go @@ -0,0 +1,20 @@ +package tarsum + +// This interface extends TarSum by adding the Remove method. In general +// there was concern about adding this method to TarSum itself so instead +// it is being added just to "BuilderContext" which will then only be used +// during the .dockerignore file processing - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/pkg/tarsum/builder_context_test.go b/pkg/tarsum/builder_context_test.go new file mode 100644 index 00000000..719f7289 --- /dev/null +++ b/pkg/tarsum/builder_context_test.go @@ -0,0 +1,63 @@ +package tarsum + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing +func TestTarSumRemoveNonExistent(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) + + ts.(BuilderContext).Remove("") + ts.(BuilderContext).Remove("Anything") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) + } +} + +// Remove a tarsum (in the BuilderContext) +func TestTarSumRemove(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) - 1 + + ts.(BuilderContext).Remove("etc/sudoers") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) + } +} diff --git a/pkg/tarsum/fileinfosums.go b/pkg/tarsum/fileinfosums.go new file mode 100644 index 00000000..32e5b378 --- /dev/null +++ b/pkg/tarsum/fileinfosums.go @@ -0,0 +1,116 @@ +package tarsum + +import "sort" + +// This info will be accessed through interface so the actual name and sum cannot be medled with +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +func (fis FileInfoSums) Len() int { return len(fis) } +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/pkg/tarsum/fileinfosums_test.go b/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 00000000..bb700d8b --- /dev/null +++ b/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,62 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } + + fis = newFileInfoSums() + fis.SortByPos() + if fis[0].Pos() != 0 { + t.Errorf("sorted fileInfoSums by Pos should order them by position.") + } + + fis = newFileInfoSums() + expected = "deadbeef1" + gotFileInfoSum := fis.GetFile("dup1") + if gotFileInfoSum.Sum() != expected { + t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) + } + if fis.GetFile("noPresent") != nil { + t.Errorf("Should have return nil if name not found.") + } + +} diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go new file mode 100644 index 00000000..a778bb0b --- /dev/null +++ b/pkg/tarsum/tarsum.go @@ -0,0 +1,276 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// Create a new TarSum, providing a THash to use rather than the DefaultTHash +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// Create a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// A hash.Hash type generator and its name +type THash interface { + Hash() hash.Hash + Name() string +} + +// Convenience method for creating a THash +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// TarSum default is "sha256" +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writter + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/pkg/tarsum/tarsum_spec.md b/pkg/tarsum/tarsum_spec.md new file mode 100644 index 00000000..51e95373 --- /dev/null +++ b/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,225 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgements + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go new file mode 100644 index 00000000..89626660 --- /dev/null +++ b/pkg/tarsum/tarsum_test.go @@ -0,0 +1,648 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + defer tarW.Close() + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// Test errors on NewTarsumForLabel +func TestNewTarSumForLabelInvalid(t *testing.T) { + reader := strings.NewReader("") + + if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + + if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } +} + +func TestNewTarSumForLabel(t *testing.T) { + + layer := testLayers[0] + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + label := strings.Split(layer.tarsum, ":")[0] + ts, err := NewTarSumForLabel(reader, false, label) + if err != nil { + t.Fatal(err) + } + + // Make sure it actually worked by reading a little bit of it + nbByteToRead := 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + } +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +// Test all the build-in read size : buf8K, buf16K, buf32K and more +func TestTarSumsReadSize(t *testing.T) { + // Test always on the same layer (that is big enough) + layer := testLayers[0] + + for i := 0; i < 5; i++ { + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, layer.version) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + nbByteToRead := (i + 1) * 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + continue + } + } +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + var expectedHashName string + if layer.hash != nil { + expectedHashName = layer.hash.Name() + } else { + expectedHashName = DefaultTHash.Name() + } + if expectedHashName != ts.Hash().Name() { + t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) + } + } +} + +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 00000000..48e2af34 --- /dev/null +++ b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 00000000..dfd5c204 Binary files /dev/null and b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar differ diff --git a/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json b/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json new file mode 100644 index 00000000..af57be01 --- /dev/null +++ b/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json @@ -0,0 +1 @@ +{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0} \ No newline at end of file diff --git a/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar b/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar new file mode 100644 index 00000000..880b3f2c Binary files /dev/null and b/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar differ diff --git a/pkg/tarsum/testdata/collision/collision-0.tar b/pkg/tarsum/testdata/collision/collision-0.tar new file mode 100644 index 00000000..1c636b3b Binary files /dev/null and b/pkg/tarsum/testdata/collision/collision-0.tar differ diff --git a/pkg/tarsum/testdata/collision/collision-1.tar b/pkg/tarsum/testdata/collision/collision-1.tar new file mode 100644 index 00000000..b411be97 Binary files /dev/null and b/pkg/tarsum/testdata/collision/collision-1.tar differ diff --git a/pkg/tarsum/testdata/collision/collision-2.tar b/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 00000000..7b5c04a9 Binary files /dev/null and b/pkg/tarsum/testdata/collision/collision-2.tar differ diff --git a/pkg/tarsum/testdata/collision/collision-3.tar b/pkg/tarsum/testdata/collision/collision-3.tar new file mode 100644 index 00000000..f8c64586 Binary files /dev/null and b/pkg/tarsum/testdata/collision/collision-3.tar differ diff --git a/pkg/tarsum/testdata/xattr/json b/pkg/tarsum/testdata/xattr/json new file mode 100644 index 00000000..288441a9 --- /dev/null +++ b/pkg/tarsum/testdata/xattr/json @@ -0,0 +1 @@ +{"id":"4439c3c7f847954100b42b267e7e5529cac1d6934db082f65795c5ca2e594d93","parent":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","created":"2014-05-16T17:19:44.091534414Z","container":"5f92fb06cc58f357f0cde41394e2bbbb664e663974b2ac1693ab07b7a306749b","container_config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","setcap 'cap_setgid,cap_setuid+ep' ./file \u0026\u0026 getcap ./file"],"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.11.1-dev","config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":0} \ No newline at end of file diff --git a/pkg/tarsum/testdata/xattr/layer.tar b/pkg/tarsum/testdata/xattr/layer.tar new file mode 100644 index 00000000..819351d4 Binary files /dev/null and b/pkg/tarsum/testdata/xattr/layer.tar differ diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go new file mode 100644 index 00000000..3cdc6dda --- /dev/null +++ b/pkg/tarsum/versioning.go @@ -0,0 +1,150 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "sort" + "strconv" + "strings" +) + +// versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // NOTE: this variable will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// Get a list of all known tarsum Version +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.Itoa(int(h.Mode))}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.Itoa(int(h.Size))}, + {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.Itoa(int(h.Devmajor))}, + {"devminor", strconv.Itoa(int(h.Devminor))}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/pkg/tarsum/versioning_test.go b/pkg/tarsum/versioning_test.go new file mode 100644 index 00000000..88e0a578 --- /dev/null +++ b/pkg/tarsum/versioning_test.go @@ -0,0 +1,98 @@ +package tarsum + +import ( + "testing" +) + +func TestVersionLabelForChecksum(t *testing.T) { + version := VersionLabelForChecksum("tarsum+sha256:deadbeef") + if version != "tarsum" { + t.Fatalf("Version should have been 'tarsum', was %v", version) + } + version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") + if version != "tarsum.v1" { + t.Fatalf("Version should have been 'tarsum.v1', was %v", version) + } + version = VersionLabelForChecksum("something+somethingelse") + if version != "something" { + t.Fatalf("Version should have been 'something', was %v", version) + } + version = VersionLabelForChecksum("invalidChecksum") + if version != "" { + t.Fatalf("Version should have been empty, was %v", version) + } +} + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.v1" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} + +func TestGetVersions(t *testing.T) { + expected := []Version{ + Version0, + Version1, + VersionDev, + } + versions := GetVersions() + if len(versions) != len(expected) { + t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) + } + if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { + t.Fatalf("Expected [%v], got [%v]", expected, versions) + } +} + +func containsVersion(versions []Version, version Version) bool { + for _, v := range versions { + if v == version { + return true + } + } + return false +} diff --git a/pkg/tarsum/writercloser.go b/pkg/tarsum/writercloser.go new file mode 100644 index 00000000..9727ecde --- /dev/null +++ b/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/pkg/term/tc_linux_cgo.go b/pkg/term/tc_linux_cgo.go new file mode 100644 index 00000000..d47cf59b --- /dev/null +++ b/pkg/term/tc_linux_cgo.go @@ -0,0 +1,48 @@ +// +build linux,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + newState.Oflag = newState.Oflag | C.OPOST + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/pkg/term/tc_other.go b/pkg/term/tc_other.go new file mode 100644 index 00000000..266039ba --- /dev/null +++ b/pkg/term/tc_other.go @@ -0,0 +1,19 @@ +// +build !windows +// +build !linux !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/pkg/term/term.go b/pkg/term/term.go new file mode 100644 index 00000000..b945a3dc --- /dev/null +++ b/pkg/term/term.go @@ -0,0 +1,118 @@ +// +build !windows + +package term + +import ( + "errors" + "io" + "os" + "os/signal" + "syscall" + "unsafe" +) + +var ( + ErrInvalidState = errors.New("Invalid terminal state") +) + +type State struct { + termios Termios +} + +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= syscall.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go new file mode 100644 index 00000000..f7fa1b3a --- /dev/null +++ b/pkg/term/term_windows.go @@ -0,0 +1,139 @@ +// +build windows + +package term + +import ( + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/term/winconsole" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + switch { + case os.Getenv("ConEmuANSI") == "ON": + // The ConEmu shell emulates ANSI well by default. + return os.Stdin, os.Stdout, os.Stderr + case os.Getenv("MSYSTEM") != "": + // MSYS (mingw) does not emulate ANSI well. + return winconsole.WinConsoleStreams() + default: + return winconsole.WinConsoleStreams() + } +} + +// GetFdInfo returns file descriptor and bool indicating whether the file is a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return winconsole.GetHandleInfo(in) +} + +// GetWinsize retrieves the window size of the terminal connected to the passed file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winconsole.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + // TODO(azlinux): Set the pixel width / height of the console (currently unused by any caller) + return &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + x: 0, + y: 0}, nil +} + +// SetWinsize sets the size of the given terminal connected to the passed file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + // TODO(azlinux): Implement SetWinsize + logrus.Debugf("[windows] SetWinsize: WARNING -- Unsupported method invoked") + return nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return winconsole.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor to a +// previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winconsole.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winconsole.GetConsoleMode(fd) + if e != nil { + return nil, e + } + return &State{mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winconsole.ENABLE_ECHO_INPUT + mode |= winconsole.ENABLE_PROCESSED_INPUT | winconsole.ENABLE_LINE_INPUT + // TODO(azlinux): Core code registers a goroutine to catch os.Interrupt and reset the terminal state. + return winconsole.SetConsoleMode(fd, mode) +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + // TODO(azlinux): Core code registers a goroutine to catch os.Interrupt and reset the terminal state. + return state, err +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + mode := state.mode + + // Disable these modes + mode &^= winconsole.ENABLE_ECHO_INPUT + mode &^= winconsole.ENABLE_LINE_INPUT + mode &^= winconsole.ENABLE_MOUSE_INPUT + mode &^= winconsole.ENABLE_WINDOW_INPUT + mode &^= winconsole.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winconsole.ENABLE_EXTENDED_FLAGS + mode |= winconsole.ENABLE_INSERT_MODE + mode |= winconsole.ENABLE_QUICK_EDIT_MODE + + err = winconsole.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} diff --git a/pkg/term/termios_darwin.go b/pkg/term/termios_darwin.go new file mode 100644 index 00000000..11cd70d1 --- /dev/null +++ b/pkg/term/termios_darwin.go @@ -0,0 +1,65 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA + + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]byte + Ispeed uint64 + Ospeed uint64 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/pkg/term/termios_freebsd.go b/pkg/term/termios_freebsd.go new file mode 100644 index 00000000..ed365957 --- /dev/null +++ b/pkg/term/termios_freebsd.go @@ -0,0 +1,65 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA + + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/pkg/term/termios_linux.go b/pkg/term/termios_linux.go new file mode 100644 index 00000000..024187ff --- /dev/null +++ b/pkg/term/termios_linux.go @@ -0,0 +1,46 @@ +// +build !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TCGETS + setTermios = syscall.TCSETS +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + return &oldState, nil +} diff --git a/pkg/term/winconsole/console_windows.go b/pkg/term/winconsole/console_windows.go new file mode 100644 index 00000000..ce40a931 --- /dev/null +++ b/pkg/term/winconsole/console_windows.go @@ -0,0 +1,1053 @@ +// +build windows + +package winconsole + +import ( + "bytes" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +const ( + // Consts for Get/SetConsoleMode function + // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + + // If parameter is a screen buffer handle, additional values + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + + //http://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes + FOREGROUND_BLUE = 1 + FOREGROUND_GREEN = 2 + FOREGROUND_RED = 4 + FOREGROUND_INTENSITY = 8 + FOREGROUND_MASK_SET = 0x000F + FOREGROUND_MASK_UNSET = 0xFFF0 + + BACKGROUND_BLUE = 16 + BACKGROUND_GREEN = 32 + BACKGROUND_RED = 64 + BACKGROUND_INTENSITY = 128 + BACKGROUND_MASK_SET = 0x00F0 + BACKGROUND_MASK_UNSET = 0xFF0F + + COMMON_LVB_REVERSE_VIDEO = 0x4000 + COMMON_LVB_UNDERSCORE = 0x8000 + + // http://man7.org/linux/man-pages/man4/console_codes.4.html + // ECMA-48 Set Graphics Rendition + ANSI_ATTR_RESET = 0 + ANSI_ATTR_BOLD = 1 + ANSI_ATTR_DIM = 2 + ANSI_ATTR_UNDERLINE = 4 + ANSI_ATTR_BLINK = 5 + ANSI_ATTR_REVERSE = 7 + ANSI_ATTR_INVISIBLE = 8 + + ANSI_ATTR_UNDERLINE_OFF = 24 + ANSI_ATTR_BLINK_OFF = 25 + ANSI_ATTR_REVERSE_OFF = 27 + ANSI_ATTR_INVISIBLE_OFF = 8 + + ANSI_FOREGROUND_BLACK = 30 + ANSI_FOREGROUND_RED = 31 + ANSI_FOREGROUND_GREEN = 32 + ANSI_FOREGROUND_YELLOW = 33 + ANSI_FOREGROUND_BLUE = 34 + ANSI_FOREGROUND_MAGENTA = 35 + ANSI_FOREGROUND_CYAN = 36 + ANSI_FOREGROUND_WHITE = 37 + ANSI_FOREGROUND_DEFAULT = 39 + + ANSI_BACKGROUND_BLACK = 40 + ANSI_BACKGROUND_RED = 41 + ANSI_BACKGROUND_GREEN = 42 + ANSI_BACKGROUND_YELLOW = 43 + ANSI_BACKGROUND_BLUE = 44 + ANSI_BACKGROUND_MAGENTA = 45 + ANSI_BACKGROUND_CYAN = 46 + ANSI_BACKGROUND_WHITE = 47 + ANSI_BACKGROUND_DEFAULT = 49 + + ANSI_MAX_CMD_LENGTH = 256 + + MAX_INPUT_EVENTS = 128 + MAX_INPUT_BUFFER = 1024 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 +) + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key +) + +var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + +var ( + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + fillConsoleOutputCharacterProc = kernel32DLL.NewProc("FillConsoleOutputCharacterW") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + getNumberOfConsoleInputEventsProc = kernel32DLL.NewProc("GetNumberOfConsoleInputEvents") + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") +) + +// types for calling various windows API +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx +type ( + SHORT int16 + BOOL int32 + WORD uint16 + WCHAR uint16 + DWORD uint32 + + SMALL_RECT struct { + Left SHORT + Top SHORT + Right SHORT + Bottom SHORT + } + + COORD struct { + X SHORT + Y SHORT + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes WORD + Window SMALL_RECT + MaximumWindowSize COORD + } + + CONSOLE_CURSOR_INFO struct { + Size DWORD + Visible BOOL + } + + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms684166(v=vs.85).aspx + KEY_EVENT_RECORD struct { + KeyDown BOOL + RepeatCount WORD + VirtualKeyCode WORD + VirtualScanCode WORD + UnicodeChar WCHAR + ControlKeyState DWORD + } + + INPUT_RECORD struct { + EventType WORD + KeyEvent KEY_EVENT_RECORD + } + + CHAR_INFO struct { + UnicodeChar WCHAR + Attributes WORD + } +) + +// TODO(azlinux): Basic type clean-up +// -- Convert all uses of uintptr to syscall.Handle to be consistent with Windows syscall +// -- Convert, as appropriate, types to use defined Windows types (e.g., DWORD instead of uint32) + +// Implements the TerminalEmulator interface +type WindowsTerminal struct { + outMutex sync.Mutex + inMutex sync.Mutex + inputBuffer []byte + inputSize int + inputEvents []INPUT_RECORD + screenBufferInfo *CONSOLE_SCREEN_BUFFER_INFO + inputEscapeSequence []byte +} + +func getStdHandle(stdhandle int) uintptr { + handle, err := syscall.GetStdHandle(stdhandle) + if err != nil { + panic(fmt.Errorf("could not get standard io handle %d", stdhandle)) + } + return uintptr(handle) +} + +func WinConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + handler := &WindowsTerminal{ + inputBuffer: make([]byte, MAX_INPUT_BUFFER), + inputEscapeSequence: []byte(KEY_ESC_CSI), + inputEvents: make([]INPUT_RECORD, MAX_INPUT_EVENTS), + } + + if IsConsole(os.Stdin.Fd()) { + stdIn = &terminalReader{ + wrappedReader: os.Stdin, + emulator: handler, + command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), + fd: getStdHandle(syscall.STD_INPUT_HANDLE), + } + } else { + stdIn = os.Stdin + } + + if IsConsole(os.Stdout.Fd()) { + stdoutHandle := getStdHandle(syscall.STD_OUTPUT_HANDLE) + + // Save current screen buffer info + screenBufferInfo, err := GetConsoleScreenBufferInfo(stdoutHandle) + if err != nil { + // If GetConsoleScreenBufferInfo returns a nil error, it usually means that stdout is not a TTY. + // However, this is in the branch where stdout is a TTY, hence the panic. + panic("could not get console screen buffer info") + } + handler.screenBufferInfo = screenBufferInfo + + buffer = make([]CHAR_INFO, screenBufferInfo.MaximumWindowSize.X*screenBufferInfo.MaximumWindowSize.Y) + + stdOut = &terminalWriter{ + wrappedWriter: os.Stdout, + emulator: handler, + command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), + fd: stdoutHandle, + } + } else { + stdOut = os.Stdout + } + + if IsConsole(os.Stderr.Fd()) { + stdErr = &terminalWriter{ + wrappedWriter: os.Stderr, + emulator: handler, + command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), + fd: getStdHandle(syscall.STD_ERROR_HANDLE), + } + } else { + stdErr = os.Stderr + } + + return stdIn, stdOut, stdErr +} + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + + switch t := in.(type) { + case *terminalReader: + in = t.wrappedReader + case *terminalWriter: + in = t.wrappedWriter + } + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsConsole(inFd) + } + return inFd, isTerminalIn +} + +func getError(r1, r2 uintptr, lastErr error) error { + // If the function fails, the return value is zero. + if r1 == 0 { + if lastErr != nil { + return lastErr + } + return syscall.EINVAL + } + return nil +} + +// GetConsoleMode gets the console mode for given file descriptor +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx +func GetConsoleMode(handle uintptr) (uint32, error) { + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +func SetConsoleMode(handle uintptr, mode uint32) error { + return getError(setConsoleModeProc.Call(handle, uintptr(mode), 0)) +} + +// SetCursorVisible sets the cursor visbility +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx +func SetCursorVisible(handle uintptr, isVisible BOOL) (bool, error) { + var cursorInfo *CONSOLE_CURSOR_INFO = &CONSOLE_CURSOR_INFO{} + if err := getError(getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)); err != nil { + return false, err + } + cursorInfo.Visible = isVisible + if err := getError(setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)); err != nil { + return false, err + } + return true, nil +} + +// SetWindowSize sets the size of the console window. +func SetWindowSize(handle uintptr, width, height, max SHORT) (bool, error) { + window := SMALL_RECT{Left: 0, Top: 0, Right: width - 1, Bottom: height - 1} + coord := COORD{X: width - 1, Y: max} + if err := getError(setConsoleWindowInfoProc.Call(handle, uintptr(1), uintptr(unsafe.Pointer(&window)))); err != nil { + return false, err + } + if err := getError(setConsoleScreenBufferSizeProc.Call(handle, marshal(coord))); err != nil { + return false, err + } + return true, nil +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + var info CONSOLE_SCREEN_BUFFER_INFO + if err := getError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)); err != nil { + return nil, err + } + return &info, nil +} + +// setConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function, +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx +func setConsoleTextAttribute(handle uintptr, attribute WORD) error { + return getError(setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)) +} + +func writeConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) (bool, error) { + if err := getError(writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), marshal(bufferSize), marshal(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))); err != nil { + return false, err + } + return true, nil +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682663(v=vs.85).aspx +func fillConsoleOutputCharacter(handle uintptr, fillChar byte, length uint32, writeCord COORD) (bool, error) { + out := int64(0) + if err := getError(fillConsoleOutputCharacterProc.Call(handle, uintptr(fillChar), uintptr(length), marshal(writeCord), uintptr(unsafe.Pointer(&out)))); err != nil { + return false, err + } + return true, nil +} + +// Gets the number of space characters to write for "clearing" the section of terminal +func getNumberOfChars(fromCoord COORD, toCoord COORD, screenSize COORD) uint32 { + // must be valid cursor position + if fromCoord.X < 0 || fromCoord.Y < 0 || toCoord.X < 0 || toCoord.Y < 0 { + return 0 + } + if fromCoord.X >= screenSize.X || fromCoord.Y >= screenSize.Y || toCoord.X >= screenSize.X || toCoord.Y >= screenSize.Y { + return 0 + } + // can't be backwards + if fromCoord.Y > toCoord.Y { + return 0 + } + // same line + if fromCoord.Y == toCoord.Y { + return uint32(toCoord.X-fromCoord.X) + 1 + } + // spans more than one line + if fromCoord.Y < toCoord.Y { + // from start till end of line for first line + from start of line till end + retValue := uint32(screenSize.X-fromCoord.X) + uint32(toCoord.X) + 1 + // don't count first and last line + linesBetween := toCoord.Y - fromCoord.Y - 1 + if linesBetween > 0 { + retValue = retValue + uint32(linesBetween*screenSize.X) + } + return retValue + } + return 0 +} + +var buffer []CHAR_INFO + +func clearDisplayRect(handle uintptr, attributes WORD, fromCoord COORD, toCoord COORD) (uint32, error) { + var writeRegion SMALL_RECT + writeRegion.Left = fromCoord.X + writeRegion.Top = fromCoord.Y + writeRegion.Right = toCoord.X + writeRegion.Bottom = toCoord.Y + + // allocate and initialize buffer + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + if size > 0 { + buffer := make([]CHAR_INFO, size) + for i := range buffer { + buffer[i] = CHAR_INFO{WCHAR(' '), attributes} + } + + // Write to buffer + r, err := writeConsoleOutput(handle, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &writeRegion) + if !r { + if err != nil { + return 0, err + } + return 0, syscall.EINVAL + } + } + return uint32(size), nil +} + +func clearDisplayRange(handle uintptr, attributes WORD, fromCoord COORD, toCoord COORD) (uint32, error) { + nw := uint32(0) + // start and end on same line + if fromCoord.Y == toCoord.Y { + return clearDisplayRect(handle, attributes, fromCoord, toCoord) + } + // TODO(azlinux): if full screen, optimize + + // spans more than one line + if fromCoord.Y < toCoord.Y { + // from start position till end of line for first line + n, err := clearDisplayRect(handle, attributes, fromCoord, COORD{X: toCoord.X, Y: fromCoord.Y}) + if err != nil { + return nw, err + } + nw += n + // lines between + linesBetween := toCoord.Y - fromCoord.Y - 1 + if linesBetween > 0 { + n, err = clearDisplayRect(handle, attributes, COORD{X: 0, Y: fromCoord.Y + 1}, COORD{X: toCoord.X, Y: toCoord.Y - 1}) + if err != nil { + return nw, err + } + nw += n + } + // lines at end + n, err = clearDisplayRect(handle, attributes, COORD{X: 0, Y: toCoord.Y}, toCoord) + if err != nil { + return nw, err + } + nw += n + } + return nw, nil +} + +// setConsoleCursorPosition sets the console cursor position +// Note The X and Y are zero based +// If relative is true then the new position is relative to current one +func setConsoleCursorPosition(handle uintptr, isRelative bool, column int16, line int16) error { + screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) + if err != nil { + return err + } + var position COORD + if isRelative { + position.X = screenBufferInfo.CursorPosition.X + SHORT(column) + position.Y = screenBufferInfo.CursorPosition.Y + SHORT(line) + } else { + position.X = SHORT(column) + position.Y = SHORT(line) + } + return getError(setConsoleCursorPositionProc.Call(handle, marshal(position), 0)) +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683207(v=vs.85).aspx +func getNumberOfConsoleInputEvents(handle uintptr) (uint16, error) { + var n DWORD + if err := getError(getNumberOfConsoleInputEventsProc.Call(handle, uintptr(unsafe.Pointer(&n)))); err != nil { + return 0, err + } + return uint16(n), nil +} + +//http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx +func readConsoleInputKey(handle uintptr, inputBuffer []INPUT_RECORD) (int, error) { + var nr DWORD + if err := getError(readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&inputBuffer[0])), uintptr(len(inputBuffer)), uintptr(unsafe.Pointer(&nr)))); err != nil { + return 0, err + } + return int(nr), nil +} + +func getWindowsTextAttributeForAnsiValue(originalFlag WORD, defaultValue WORD, ansiValue int16) (WORD, error) { + flag := WORD(originalFlag) + if flag == 0 { + flag = defaultValue + } + switch ansiValue { + case ANSI_ATTR_RESET: + flag &^= COMMON_LVB_UNDERSCORE + flag &^= BACKGROUND_INTENSITY + flag = flag | FOREGROUND_INTENSITY + case ANSI_ATTR_INVISIBLE: + // TODO: how do you reset reverse? + case ANSI_ATTR_UNDERLINE: + flag = flag | COMMON_LVB_UNDERSCORE + case ANSI_ATTR_BLINK: + // seems like background intenisty is blink + flag = flag | BACKGROUND_INTENSITY + case ANSI_ATTR_UNDERLINE_OFF: + flag &^= COMMON_LVB_UNDERSCORE + case ANSI_ATTR_BLINK_OFF: + // seems like background intenisty is blink + flag &^= BACKGROUND_INTENSITY + case ANSI_ATTR_BOLD: + flag = flag | FOREGROUND_INTENSITY + case ANSI_ATTR_DIM: + flag &^= FOREGROUND_INTENSITY + case ANSI_ATTR_REVERSE, ANSI_ATTR_REVERSE_OFF: + // swap forground and background bits + foreground := flag & FOREGROUND_MASK_SET + background := flag & BACKGROUND_MASK_SET + flag = (flag & BACKGROUND_MASK_UNSET & FOREGROUND_MASK_UNSET) | (foreground << 4) | (background >> 4) + + // FOREGROUND + case ANSI_FOREGROUND_DEFAULT: + flag = (flag & FOREGROUND_MASK_UNSET) | (defaultValue & FOREGROUND_MASK_SET) + case ANSI_FOREGROUND_BLACK: + flag = flag ^ (FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE) + case ANSI_FOREGROUND_RED: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED + case ANSI_FOREGROUND_GREEN: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_GREEN + case ANSI_FOREGROUND_YELLOW: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_GREEN + case ANSI_FOREGROUND_BLUE: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_BLUE + case ANSI_FOREGROUND_MAGENTA: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_BLUE + case ANSI_FOREGROUND_CYAN: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_GREEN | FOREGROUND_BLUE + case ANSI_FOREGROUND_WHITE: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background + case ANSI_BACKGROUND_DEFAULT: + // Black with no intensity + flag = (flag & BACKGROUND_MASK_UNSET) | (defaultValue & BACKGROUND_MASK_SET) + case ANSI_BACKGROUND_BLACK: + flag = (flag & BACKGROUND_MASK_UNSET) + case ANSI_BACKGROUND_RED: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED + case ANSI_BACKGROUND_GREEN: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_GREEN + case ANSI_BACKGROUND_YELLOW: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_GREEN + case ANSI_BACKGROUND_BLUE: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_BLUE + case ANSI_BACKGROUND_MAGENTA: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_BLUE + case ANSI_BACKGROUND_CYAN: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_GREEN | BACKGROUND_BLUE + case ANSI_BACKGROUND_WHITE: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + return flag, nil +} + +// HandleOutputCommand interpretes the Ansi commands and then makes appropriate Win32 calls +func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) (n int, err error) { + // always consider all the bytes in command, processed + n = len(command) + + parsedCommand := parseAnsiCommand(command) + logrus.Debugf("[windows] HandleOutputCommand: %v", parsedCommand) + + // console settings changes need to happen in atomic way + term.outMutex.Lock() + defer term.outMutex.Unlock() + + switch parsedCommand.Command { + case "m": + // [Value;...;Valuem + // Set Graphics Mode: + // Calls the graphics functions specified by the following values. + // These specified functions remain active until the next occurrence of this escape sequence. + // Graphics mode changes the colors and attributes of text (such as bold and underline) displayed on the screen. + screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) + if err != nil { + return n, err + } + flag := screenBufferInfo.Attributes + for _, e := range parsedCommand.Parameters { + value, _ := strconv.ParseInt(e, 10, 16) // base 10, 16 bit + if value == ANSI_ATTR_RESET { + flag = term.screenBufferInfo.Attributes // reset + } else { + flag, err = getWindowsTextAttributeForAnsiValue(flag, term.screenBufferInfo.Attributes, int16(value)) + if err != nil { + return n, err + } + } + } + if err := setConsoleTextAttribute(handle, flag); err != nil { + return n, err + } + case "H", "f": + // [line;columnH + // [line;columnf + // Moves the cursor to the specified position (coordinates). + // If you do not specify a position, the cursor moves to the home position at the upper-left corner of the screen (line 0, column 0). + screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) + if err != nil { + return n, err + } + line, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return n, err + } + if line > int16(screenBufferInfo.Window.Bottom) { + line = int16(screenBufferInfo.Window.Bottom) + 1 + } + column, err := parseInt16OrDefault(parsedCommand.getParam(1), 1) + if err != nil { + return n, err + } + if column > int16(screenBufferInfo.Window.Right) { + column = int16(screenBufferInfo.Window.Right) + 1 + } + // The numbers are not 0 based, but 1 based + logrus.Debugf("[windows] HandleOutputCommmand: Moving cursor to (%v,%v)", column-1, line-1) + if err := setConsoleCursorPosition(handle, false, column-1, line-1); err != nil { + return n, err + } + + case "A": + // [valueA + // Moves the cursor up by the specified number of lines without changing columns. + // If the cursor is already on the top line, ignores this sequence. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return len(command), err + } + if err := setConsoleCursorPosition(handle, true, 0, -value); err != nil { + return n, err + } + case "B": + // [valueB + // Moves the cursor down by the specified number of lines without changing columns. + // If the cursor is already on the bottom line, ignores this sequence. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return n, err + } + if err := setConsoleCursorPosition(handle, true, 0, value); err != nil { + return n, err + } + case "C": + // [valueC + // Moves the cursor forward by the specified number of columns without changing lines. + // If the cursor is already in the rightmost column, ignores this sequence. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return n, err + } + if err := setConsoleCursorPosition(handle, true, value, 0); err != nil { + return n, err + } + case "D": + // [valueD + // Moves the cursor back by the specified number of columns without changing lines. + // If the cursor is already in the leftmost column, ignores this sequence. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return n, err + } + if err := setConsoleCursorPosition(handle, true, -value, 0); err != nil { + return n, err + } + case "J": + // [J Erases from the cursor to the end of the screen, including the cursor position. + // [1J Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J Erases the complete display. The cursor does not move. + // Clears the screen and moves the cursor to the home position (line 0, column 0). + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 0) + if err != nil { + return n, err + } + var start COORD + var cursor COORD + var end COORD + screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) + if err != nil { + return n, err + } + switch value { + case 0: + start = screenBufferInfo.CursorPosition + // end of the buffer + end.X = screenBufferInfo.Size.X - 1 + end.Y = screenBufferInfo.Size.Y - 1 + // cursor + cursor = screenBufferInfo.CursorPosition + case 1: + + // start of the screen + start.X = 0 + start.Y = 0 + // end of the screen + end = screenBufferInfo.CursorPosition + // cursor + cursor = screenBufferInfo.CursorPosition + case 2: + // start of the screen + start.X = 0 + start.Y = 0 + // end of the buffer + end.X = screenBufferInfo.Size.X - 1 + end.Y = screenBufferInfo.Size.Y - 1 + // cursor + cursor.X = 0 + cursor.Y = 0 + } + if _, err := clearDisplayRange(uintptr(handle), term.screenBufferInfo.Attributes, start, end); err != nil { + return n, err + } + // remember the the cursor position is 1 based + if err := setConsoleCursorPosition(handle, false, int16(cursor.X), int16(cursor.Y)); err != nil { + return n, err + } + + case "K": + // [K + // Clears all characters from the cursor position to the end of the line (including the character at the cursor position). + // [K Erases from the cursor to the end of the line, including the cursor position. + // [1K Erases from the beginning of the line to the cursor, including the cursor position. + // [2K Erases the complete line. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 0) + var start COORD + var cursor COORD + var end COORD + screenBufferInfo, err := GetConsoleScreenBufferInfo(uintptr(handle)) + if err != nil { + return n, err + } + switch value { + case 0: + // start is where cursor is + start = screenBufferInfo.CursorPosition + // end of line + end.X = screenBufferInfo.Size.X - 1 + end.Y = screenBufferInfo.CursorPosition.Y + // cursor remains the same + cursor = screenBufferInfo.CursorPosition + + case 1: + // beginning of line + start.X = 0 + start.Y = screenBufferInfo.CursorPosition.Y + // until cursor + end = screenBufferInfo.CursorPosition + // cursor remains the same + cursor = screenBufferInfo.CursorPosition + case 2: + // start of the line + start.X = 0 + start.Y = screenBufferInfo.CursorPosition.Y - 1 + // end of the line + end.X = screenBufferInfo.Size.X - 1 + end.Y = screenBufferInfo.CursorPosition.Y - 1 + // cursor + cursor.X = 0 + cursor.Y = screenBufferInfo.CursorPosition.Y - 1 + } + if _, err := clearDisplayRange(uintptr(handle), term.screenBufferInfo.Attributes, start, end); err != nil { + return n, err + } + // remember the the cursor position is 1 based + if err := setConsoleCursorPosition(uintptr(handle), false, int16(cursor.X), int16(cursor.Y)); err != nil { + return n, err + } + + case "l": + for _, value := range parsedCommand.Parameters { + switch value { + case "?25", "25": + SetCursorVisible(uintptr(handle), BOOL(0)) + case "?1049", "1049": + // TODO (azlinux): Restore terminal + case "?1", "1": + // If the DECCKM function is reset, then the arrow keys send ANSI cursor sequences to the host. + term.inputEscapeSequence = []byte(KEY_ESC_CSI) + } + } + case "h": + for _, value := range parsedCommand.Parameters { + switch value { + case "?25", "25": + SetCursorVisible(uintptr(handle), BOOL(1)) + case "?1049", "1049": + // TODO (azlinux): Save terminal + case "?1", "1": + // If the DECCKM function is set, then the arrow keys send application sequences to the host. + // DECCKM (default off): When set, the cursor keys send an ESC O prefix, rather than ESC [. + term.inputEscapeSequence = []byte(KEY_ESC_O) + } + } + + case "]": + /* + TODO (azlinux): + Linux Console Private CSI Sequences + + The following sequences are neither ECMA-48 nor native VT102. They are + native to the Linux console driver. Colors are in SGR parameters: 0 = + black, 1 = red, 2 = green, 3 = brown, 4 = blue, 5 = magenta, 6 = cyan, + 7 = white. + + ESC [ 1 ; n ] Set color n as the underline color + ESC [ 2 ; n ] Set color n as the dim color + ESC [ 8 ] Make the current color pair the default attributes. + ESC [ 9 ; n ] Set screen blank timeout to n minutes. + ESC [ 10 ; n ] Set bell frequency in Hz. + ESC [ 11 ; n ] Set bell duration in msec. + ESC [ 12 ; n ] Bring specified console to the front. + ESC [ 13 ] Unblank the screen. + ESC [ 14 ; n ] Set the VESA powerdown interval in minutes. + + */ + } + return n, nil +} + +// WriteChars writes the bytes to given writer. +func (term *WindowsTerminal) WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + return w.Write(p) +} + +const ( + CAPSLOCK_ON = 0x0080 //The CAPS LOCK light is on. + ENHANCED_KEY = 0x0100 //The key is enhanced. + LEFT_ALT_PRESSED = 0x0002 //The left ALT key is pressed. + LEFT_CTRL_PRESSED = 0x0008 //The left CTRL key is pressed. + NUMLOCK_ON = 0x0020 //The NUM LOCK light is on. + RIGHT_ALT_PRESSED = 0x0001 //The right ALT key is pressed. + RIGHT_CTRL_PRESSED = 0x0004 //The right CTRL key is pressed. + SCROLLLOCK_ON = 0x0040 //The SCROLL LOCK light is on. + SHIFT_PRESSED = 0x0010 // The SHIFT key is pressed. +) + +const ( + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" +) + +var keyMapPrefix = map[WORD]string{ + VK_UP: "\x1B[%sA", + VK_DOWN: "\x1B[%sB", + VK_RIGHT: "\x1B[%sC", + VK_LEFT: "\x1B[%sD", + VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + VK_END: "\x1B[4%s~", // showkey shows ^[[4 + VK_INSERT: "\x1B[2%s~", + VK_DELETE: "\x1B[3%s~", + VK_PRIOR: "\x1B[5%s~", + VK_NEXT: "\x1B[6%s~", + VK_F1: "", + VK_F2: "", + VK_F3: "\x1B[13%s~", + VK_F4: "\x1B[14%s~", + VK_F5: "\x1B[15%s~", + VK_F6: "\x1B[17%s~", + VK_F7: "\x1B[18%s~", + VK_F8: "\x1B[19%s~", + VK_F9: "\x1B[20%s~", + VK_F10: "\x1B[21%s~", + VK_F11: "\x1B[23%s~", + VK_F12: "\x1B[24%s~", +} + +var arrowKeyMapPrefix = map[WORD]string{ + VK_UP: "%s%sA", + VK_DOWN: "%s%sB", + VK_RIGHT: "%s%sC", + VK_LEFT: "%s%sD", +} + +func getControlStateParameter(shift, alt, control, meta bool) string { + if shift && alt && control { + return KEY_CONTROL_PARAM_8 + } + if alt && control { + return KEY_CONTROL_PARAM_7 + } + if shift && control { + return KEY_CONTROL_PARAM_6 + } + if control { + return KEY_CONTROL_PARAM_5 + } + if shift && alt { + return KEY_CONTROL_PARAM_4 + } + if alt { + return KEY_CONTROL_PARAM_3 + } + if shift { + return KEY_CONTROL_PARAM_2 + } + return "" +} + +func getControlKeys(controlState DWORD) (shift, alt, control bool) { + shift = 0 != (controlState & SHIFT_PRESSED) + alt = 0 != (controlState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (LEFT_CTRL_PRESSED | RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +func charSequenceForKeys(key WORD, controlState DWORD, escapeSequence []byte) string { + i, ok := arrowKeyMapPrefix[key] + if ok { + shift, alt, control := getControlKeys(controlState) + modifier := getControlStateParameter(shift, alt, control, false) + return fmt.Sprintf(i, escapeSequence, modifier) + } + + i, ok = keyMapPrefix[key] + if ok { + shift, alt, control := getControlKeys(controlState) + modifier := getControlStateParameter(shift, alt, control, false) + return fmt.Sprintf(i, modifier) + } + + return "" +} + +// mapKeystokeToTerminalString maps the given input event record to string +func mapKeystokeToTerminalString(keyEvent *KEY_EVENT_RECORD, escapeSequence []byte) string { + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if keyEvent.UnicodeChar == 0 { + return charSequenceForKeys(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + // +Key generates ESC N Key + if !control && alt { + return KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + return string(keyEvent.UnicodeChar) +} + +// getAvailableInputEvents polls the console for availble events +// The function does not return until at least one input record has been read. +func getAvailableInputEvents(handle uintptr, inputEvents []INPUT_RECORD) (n int, err error) { + // TODO(azlinux): Why is there a for loop? Seems to me, that `n` cannot be negative. - tibor + for { + // Read number of console events available + n, err = readConsoleInputKey(handle, inputEvents) + if err != nil || n >= 0 { + return n, err + } + } +} + +// getTranslatedKeyCodes converts the input events into the string of characters +// The ansi escape sequence are used to map key strokes to the strings +func getTranslatedKeyCodes(inputEvents []INPUT_RECORD, escapeSequence []byte) string { + var buf bytes.Buffer + for i := 0; i < len(inputEvents); i++ { + input := inputEvents[i] + if input.EventType == KEY_EVENT && input.KeyEvent.KeyDown != 0 { + keyString := mapKeystokeToTerminalString(&input.KeyEvent, escapeSequence) + buf.WriteString(keyString) + } + } + return buf.String() +} + +// ReadChars reads the characters from the given reader +func (term *WindowsTerminal) ReadChars(fd uintptr, r io.Reader, p []byte) (n int, err error) { + for term.inputSize == 0 { + nr, err := getAvailableInputEvents(fd, term.inputEvents) + if nr == 0 && nil != err { + return n, err + } + if nr > 0 { + keyCodes := getTranslatedKeyCodes(term.inputEvents[:nr], term.inputEscapeSequence) + term.inputSize = copy(term.inputBuffer, keyCodes) + } + } + n = copy(p, term.inputBuffer[:term.inputSize]) + term.inputSize -= n + return n, nil +} + +// HandleInputSequence interprets the input sequence command +func (term *WindowsTerminal) HandleInputSequence(fd uintptr, command []byte) (n int, err error) { + return 0, nil +} + +func marshal(c COORD) uintptr { + return uintptr(*((*DWORD)(unsafe.Pointer(&c)))) +} + +// IsConsole returns true if the given file descriptor is a terminal. +// -- The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := GetConsoleMode(fd) + return e == nil +} diff --git a/pkg/term/winconsole/console_windows_test.go b/pkg/term/winconsole/console_windows_test.go new file mode 100644 index 00000000..edb5d6f6 --- /dev/null +++ b/pkg/term/winconsole/console_windows_test.go @@ -0,0 +1,232 @@ +// +build windows + +package winconsole + +import ( + "fmt" + "testing" +) + +func helpsTestParseInt16OrDefault(t *testing.T, expectedValue int16, shouldFail bool, input string, defaultValue int16, format string, args ...string) { + value, err := parseInt16OrDefault(input, defaultValue) + if nil != err && !shouldFail { + t.Errorf("Unexpected error returned %v", err) + t.Errorf(format, args) + } + if nil == err && shouldFail { + t.Errorf("Should have failed as expected\n\tReturned value = %d", value) + t.Errorf(format, args) + } + if expectedValue != value { + t.Errorf("The value returned does not match expected\n\tExpected:%v\n\t:Actual%v", expectedValue, value) + t.Errorf(format, args) + } +} + +func TestParseInt16OrDefault(t *testing.T) { + // empty string + helpsTestParseInt16OrDefault(t, 0, false, "", 0, "Empty string returns default") + helpsTestParseInt16OrDefault(t, 2, false, "", 2, "Empty string returns default") + + // normal case + helpsTestParseInt16OrDefault(t, 0, false, "0", 0, "0 handled correctly") + helpsTestParseInt16OrDefault(t, 111, false, "111", 2, "Normal") + helpsTestParseInt16OrDefault(t, 111, false, "+111", 2, "+N") + helpsTestParseInt16OrDefault(t, -111, false, "-111", 2, "-N") + helpsTestParseInt16OrDefault(t, 0, false, "+0", 11, "+0") + helpsTestParseInt16OrDefault(t, 0, false, "-0", 12, "-0") + + // ill formed strings + helpsTestParseInt16OrDefault(t, 0, true, "abc", 0, "Invalid string") + helpsTestParseInt16OrDefault(t, 42, true, "+= 23", 42, "Invalid string") + helpsTestParseInt16OrDefault(t, 42, true, "123.45", 42, "float like") + +} + +func helpsTestGetNumberOfChars(t *testing.T, expected uint32, fromCoord COORD, toCoord COORD, screenSize COORD, format string, args ...interface{}) { + actual := getNumberOfChars(fromCoord, toCoord, screenSize) + mesg := fmt.Sprintf(format, args) + assertTrue(t, expected == actual, fmt.Sprintf("%s Expected=%d, Actual=%d, Parameters = { fromCoord=%+v, toCoord=%+v, screenSize=%+v", mesg, expected, actual, fromCoord, toCoord, screenSize)) +} + +func TestGetNumberOfChars(t *testing.T) { + // Note: The columns and lines are 0 based + // Also that interval is "inclusive" means will have both start and end chars + // This test only tests the number opf characters being written + + // all four corners + maxWindow := COORD{X: 80, Y: 50} + leftTop := COORD{X: 0, Y: 0} + rightTop := COORD{X: 79, Y: 0} + leftBottom := COORD{X: 0, Y: 49} + rightBottom := COORD{X: 79, Y: 49} + + // same position + helpsTestGetNumberOfChars(t, 1, COORD{X: 1, Y: 14}, COORD{X: 1, Y: 14}, COORD{X: 80, Y: 50}, "Same position random line") + + // four corners + helpsTestGetNumberOfChars(t, 1, leftTop, leftTop, maxWindow, "Same position- leftTop") + helpsTestGetNumberOfChars(t, 1, rightTop, rightTop, maxWindow, "Same position- rightTop") + helpsTestGetNumberOfChars(t, 1, leftBottom, leftBottom, maxWindow, "Same position- leftBottom") + helpsTestGetNumberOfChars(t, 1, rightBottom, rightBottom, maxWindow, "Same position- rightBottom") + + // from this char to next char on same line + helpsTestGetNumberOfChars(t, 2, COORD{X: 0, Y: 0}, COORD{X: 1, Y: 0}, maxWindow, "Next position on same line") + helpsTestGetNumberOfChars(t, 2, COORD{X: 1, Y: 14}, COORD{X: 2, Y: 14}, maxWindow, "Next position on same line") + + // from this char to next 10 chars on same line + helpsTestGetNumberOfChars(t, 11, COORD{X: 0, Y: 0}, COORD{X: 10, Y: 0}, maxWindow, "Next position on same line") + helpsTestGetNumberOfChars(t, 11, COORD{X: 1, Y: 14}, COORD{X: 11, Y: 14}, maxWindow, "Next position on same line") + + helpsTestGetNumberOfChars(t, 5, COORD{X: 3, Y: 11}, COORD{X: 7, Y: 11}, maxWindow, "To and from on same line") + + helpsTestGetNumberOfChars(t, 8, COORD{X: 0, Y: 34}, COORD{X: 7, Y: 34}, maxWindow, "Start of line to middle") + helpsTestGetNumberOfChars(t, 4, COORD{X: 76, Y: 34}, COORD{X: 79, Y: 34}, maxWindow, "Middle to end of line") + + // multiple lines - 1 + helpsTestGetNumberOfChars(t, 81, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 1}, maxWindow, "one line below same X") + helpsTestGetNumberOfChars(t, 81, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 11}, maxWindow, "one line below same X") + + // multiple lines - 2 + helpsTestGetNumberOfChars(t, 161, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 2}, maxWindow, "one line below same X") + helpsTestGetNumberOfChars(t, 161, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 12}, maxWindow, "one line below same X") + + // multiple lines - 3 + helpsTestGetNumberOfChars(t, 241, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 3}, maxWindow, "one line below same X") + helpsTestGetNumberOfChars(t, 241, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 13}, maxWindow, "one line below same X") + + // full line + helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 0}, COORD{X: 79, Y: 0}, maxWindow, "Full line - first") + helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 23}, COORD{X: 79, Y: 23}, maxWindow, "Full line - random") + helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 49}, COORD{X: 79, Y: 49}, maxWindow, "Full line - last") + + // full screen + helpsTestGetNumberOfChars(t, 80*50, leftTop, rightBottom, maxWindow, "full screen") + + helpsTestGetNumberOfChars(t, 80*50-1, COORD{X: 1, Y: 0}, rightBottom, maxWindow, "dropping first char to, end of screen") + helpsTestGetNumberOfChars(t, 80*50-2, COORD{X: 2, Y: 0}, rightBottom, maxWindow, "dropping first two char to, end of screen") + + helpsTestGetNumberOfChars(t, 80*50-1, leftTop, COORD{X: 78, Y: 49}, maxWindow, "from start of screen, till last char-1") + helpsTestGetNumberOfChars(t, 80*50-2, leftTop, COORD{X: 77, Y: 49}, maxWindow, "from start of screen, till last char-2") + + helpsTestGetNumberOfChars(t, 80*50-5, COORD{X: 4, Y: 0}, COORD{X: 78, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-1") + helpsTestGetNumberOfChars(t, 80*50-6, COORD{X: 4, Y: 0}, COORD{X: 77, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-2") +} + +var allForeground = []int16{ + ANSI_FOREGROUND_BLACK, + ANSI_FOREGROUND_RED, + ANSI_FOREGROUND_GREEN, + ANSI_FOREGROUND_YELLOW, + ANSI_FOREGROUND_BLUE, + ANSI_FOREGROUND_MAGENTA, + ANSI_FOREGROUND_CYAN, + ANSI_FOREGROUND_WHITE, + ANSI_FOREGROUND_DEFAULT, +} +var allBackground = []int16{ + ANSI_BACKGROUND_BLACK, + ANSI_BACKGROUND_RED, + ANSI_BACKGROUND_GREEN, + ANSI_BACKGROUND_YELLOW, + ANSI_BACKGROUND_BLUE, + ANSI_BACKGROUND_MAGENTA, + ANSI_BACKGROUND_CYAN, + ANSI_BACKGROUND_WHITE, + ANSI_BACKGROUND_DEFAULT, +} + +func maskForeground(flag WORD) WORD { + return flag & FOREGROUND_MASK_UNSET +} + +func onlyForeground(flag WORD) WORD { + return flag & FOREGROUND_MASK_SET +} + +func maskBackground(flag WORD) WORD { + return flag & BACKGROUND_MASK_UNSET +} + +func onlyBackground(flag WORD) WORD { + return flag & BACKGROUND_MASK_SET +} + +func helpsTestGetWindowsTextAttributeForAnsiValue(t *testing.T, oldValue WORD /*, expected WORD*/, ansi int16, onlyMask WORD, restMask WORD) WORD { + actual, err := getWindowsTextAttributeForAnsiValue(oldValue, FOREGROUND_MASK_SET, ansi) + assertTrue(t, nil == err, "Should be no error") + // assert that other bits are not affected + if 0 != oldValue { + assertTrue(t, (actual&restMask) == (oldValue&restMask), "The operation should not have affected other bits actual=%X oldValue=%X ansi=%d", actual, oldValue, ansi) + } + return actual +} + +func TestBackgroundForAnsiValue(t *testing.T) { + // Check that nothing else changes + // background changes + for _, state1 := range allBackground { + for _, state2 := range allBackground { + flag := WORD(0) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + } + } + // cummulative bcakground changes + for _, state1 := range allBackground { + flag := WORD(0) + for _, state2 := range allBackground { + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + } + } + // change background after foreground + for _, state1 := range allForeground { + for _, state2 := range allBackground { + flag := WORD(0) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + } + } + // change background after change cumulative + for _, state1 := range allForeground { + flag := WORD(0) + for _, state2 := range allBackground { + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + } + } +} + +func TestForegroundForAnsiValue(t *testing.T) { + // Check that nothing else changes + for _, state1 := range allForeground { + for _, state2 := range allForeground { + flag := WORD(0) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + } + } + + for _, state1 := range allForeground { + flag := WORD(0) + for _, state2 := range allForeground { + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + } + } + for _, state1 := range allBackground { + for _, state2 := range allForeground { + flag := WORD(0) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + } + } + for _, state1 := range allBackground { + flag := WORD(0) + for _, state2 := range allForeground { + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + } + } +} diff --git a/pkg/term/winconsole/term_emulator.go b/pkg/term/winconsole/term_emulator.go new file mode 100644 index 00000000..2d5edc03 --- /dev/null +++ b/pkg/term/winconsole/term_emulator.go @@ -0,0 +1,234 @@ +package winconsole + +import ( + "fmt" + "io" + "strconv" + "strings" +) + +// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +const ( + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + ANSI_BEL = 0x07 + KEY_EVENT = 1 +) + +// Interface that implements terminal handling +type terminalEmulator interface { + HandleOutputCommand(fd uintptr, command []byte) (n int, err error) + HandleInputSequence(fd uintptr, command []byte) (n int, err error) + WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) + ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error) +} + +type terminalWriter struct { + wrappedWriter io.Writer + emulator terminalEmulator + command []byte + inSequence bool + fd uintptr +} + +type terminalReader struct { + wrappedReader io.ReadCloser + emulator terminalEmulator + command []byte + inSequence bool + fd uintptr +} + +// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +func isAnsiCommandChar(b byte) bool { + switch { + case ANSI_COMMAND_FIRST <= b && b <= ANSI_COMMAND_LAST && b != ANSI_ESCAPE_SECONDARY: + return true + case b == ANSI_CMD_G1 || b == ANSI_CMD_OSC || b == ANSI_CMD_DECPAM || b == ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ANSI_CMD_STR_TERM || b == ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ANSI_CMD_G0 || b == ANSI_CMD_G1 || b == ANSI_CMD_G2 || b == ANSI_CMD_G3) +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_CMD_OSC && current != ANSI_BEL) +} + +// Write writes len(p) bytes from p to the underlying data stream. +// http://golang.org/pkg/io/#Writer +func (tw *terminalWriter) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + if tw.emulator == nil { + return tw.wrappedWriter.Write(p) + } + // Emulate terminal by extracting commands and executing them + totalWritten := 0 + start := 0 // indicates start of the next chunk + end := len(p) + for current := 0; current < end; current++ { + if tw.inSequence { + // inside escape sequence + tw.command = append(tw.command, p[current]) + if isAnsiCommandChar(p[current]) { + if !isXtermOscSequence(tw.command, p[current]) { + // found the last command character. + // Now we have a complete command. + nchar, err := tw.emulator.HandleOutputCommand(tw.fd, tw.command) + totalWritten += nchar + if err != nil { + return totalWritten, err + } + + // clear the command + // don't include current character again + tw.command = tw.command[:0] + start = current + 1 + tw.inSequence = false + } + } + } else { + if p[current] == ANSI_ESCAPE_PRIMARY { + // entering escape sequnce + tw.inSequence = true + // indicates end of "normal sequence", write whatever you have so far + if len(p[start:current]) > 0 { + nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:current]) + totalWritten += nw + if err != nil { + return totalWritten, err + } + } + // include the current character as part of the next sequence + tw.command = append(tw.command, p[current]) + } + } + } + // note that so far, start of the escape sequence triggers writing out of bytes to console. + // For the part _after_ the end of last escape sequence, it is not written out yet. So write it out + if !tw.inSequence { + // assumption is that we can't be inside sequence and therefore command should be empty + if len(p[start:]) > 0 { + nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:]) + totalWritten += nw + if err != nil { + return totalWritten, err + } + } + } + return totalWritten, nil + +} + +// Read reads up to len(p) bytes into p. +// http://golang.org/pkg/io/#Reader +func (tr *terminalReader) Read(p []byte) (n int, err error) { + //Implementations of Read are discouraged from returning a zero byte count + // with a nil error, except when len(p) == 0. + if len(p) == 0 { + return 0, nil + } + if nil == tr.emulator { + return tr.readFromWrappedReader(p) + } + return tr.emulator.ReadChars(tr.fd, tr.wrappedReader, p) +} + +// Close the underlying stream +func (tr *terminalReader) Close() (err error) { + return tr.wrappedReader.Close() +} + +func (tr *terminalReader) readFromWrappedReader(p []byte) (n int, err error) { + return tr.wrappedReader.Read(p) +} + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func parseAnsiCommand(command []byte) *ansiCommand { + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + // last char is command character + lastCharIndex := len(command) - 1 + + retValue := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + retValue.Parameters = strings.Split(string(command[start:lastCharIndex]), ANSI_PARAMETER_SEP) + } + return retValue +} + +func (c *ansiCommand) getParam(index int) string { + if len(c.Parameters) > index { + return c.Parameters[index] + } + return "" +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +func parseInt16OrDefault(s string, defaultValue int16) (n int16, err error) { + if s == "" { + return defaultValue, nil + } + parsedValue, err := strconv.ParseInt(s, 10, 16) + if err != nil { + return defaultValue, err + } + return int16(parsedValue), nil +} diff --git a/pkg/term/winconsole/term_emulator_test.go b/pkg/term/winconsole/term_emulator_test.go new file mode 100644 index 00000000..94104ff5 --- /dev/null +++ b/pkg/term/winconsole/term_emulator_test.go @@ -0,0 +1,388 @@ +package winconsole + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "testing" +) + +const ( + WRITE_OPERATION = iota + COMMAND_OPERATION = iota +) + +var languages = []string{ + "Български", + "Català", + "ČeÅ¡tina", + "Ελληνικά", + "Español", + "Esperanto", + "Euskara", + "Français", + "Galego", + "한국어", + "ქართული", + "LatvieÅ¡u", + "Lietuvių", + "Magyar", + "Nederlands", + "日本語", + "Norsk bokmÃ¥l", + "Norsk nynorsk", + "Polski", + "Português", + "Română", + "Русский", + "Slovenčina", + "Slovenščina", + "Српски", + "српскохрватски", + "Suomi", + "Svenska", + "ไทย", + "Tiếng Việt", + "Türkçe", + "Українська", + "中文", +} + +// Mock terminal handler object +type mockTerminal struct { + OutputCommandSequence []terminalOperation +} + +// Used for recording the callback data +type terminalOperation struct { + Operation int + Data []byte + Str string +} + +func (mt *mockTerminal) record(operation int, data []byte) { + op := terminalOperation{ + Operation: operation, + Data: make([]byte, len(data)), + } + copy(op.Data, data) + op.Str = string(op.Data) + mt.OutputCommandSequence = append(mt.OutputCommandSequence, op) +} + +func (mt *mockTerminal) HandleOutputCommand(fd uintptr, command []byte) (n int, err error) { + mt.record(COMMAND_OPERATION, command) + return len(command), nil +} + +func (mt *mockTerminal) HandleInputSequence(fd uintptr, command []byte) (n int, err error) { + return 0, nil +} + +func (mt *mockTerminal) WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) { + mt.record(WRITE_OPERATION, p) + return len(p), nil +} + +func (mt *mockTerminal) ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error) { + return len(p), nil +} + +func assertTrue(t *testing.T, cond bool, format string, args ...interface{}) { + if !cond { + t.Errorf(format, args...) + } +} + +// reflect.DeepEqual does not provide detailed information as to what excatly failed. +func assertBytesEqual(t *testing.T, expected, actual []byte, format string, args ...interface{}) { + match := true + mismatchIndex := 0 + if len(expected) == len(actual) { + for i := 0; i < len(expected); i++ { + if expected[i] != actual[i] { + match = false + mismatchIndex = i + break + } + } + } else { + match = false + t.Errorf("Lengths don't match Expected=%d Actual=%d", len(expected), len(actual)) + } + if !match { + t.Errorf("Mismatch at index %d ", mismatchIndex) + t.Errorf("\tActual String = %s", string(actual)) + t.Errorf("\tExpected String = %s", string(expected)) + t.Errorf("\tActual = %v", actual) + t.Errorf("\tExpected = %v", expected) + t.Errorf(format, args) + } +} + +// Just to make sure :) +func TestAssertEqualBytes(t *testing.T) { + data := []byte{9, 9, 1, 1, 1, 9, 9} + assertBytesEqual(t, data, data, "Self") + assertBytesEqual(t, data[1:4], data[1:4], "Self") + assertBytesEqual(t, []byte{1, 1}, []byte{1, 1}, "Simple match") + assertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 2, 3}, "content mismatch") + assertBytesEqual(t, []byte{1, 1, 1}, data[2:5], "slice match") +} + +/* +func TestAssertEqualBytesNegative(t *testing.T) { + AssertBytesEqual(t, []byte{1, 1}, []byte{1}, "Length mismatch") + AssertBytesEqual(t, []byte{1, 1}, []byte{1}, "Length mismatch") + AssertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 1, 1}, "content mismatch") +}*/ + +// Checks that the calls received +func assertHandlerOutput(t *testing.T, mock *mockTerminal, plainText string, commands ...string) { + text := make([]byte, 0, 3*len(plainText)) + cmdIndex := 0 + for opIndex := 0; opIndex < len(mock.OutputCommandSequence); opIndex++ { + op := mock.OutputCommandSequence[opIndex] + if op.Operation == WRITE_OPERATION { + t.Logf("\nThe data is[%d] == %s", opIndex, string(op.Data)) + text = append(text[:], op.Data...) + } else { + assertTrue(t, mock.OutputCommandSequence[opIndex].Operation == COMMAND_OPERATION, "Operation should be command : %s", fmt.Sprintf("%+v", mock)) + assertBytesEqual(t, StringToBytes(commands[cmdIndex]), mock.OutputCommandSequence[opIndex].Data, "Command data should match") + cmdIndex++ + } + } + assertBytesEqual(t, StringToBytes(plainText), text, "Command data should match %#v", mock) +} + +func StringToBytes(str string) []byte { + bytes := make([]byte, len(str)) + copy(bytes[:], str) + return bytes +} + +func TestParseAnsiCommand(t *testing.T) { + // Note: if the parameter does not exist then the empty value is returned + + c := parseAnsiCommand(StringToBytes("\x1Bm")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "" == c.getParam(0), "should return empty string") + assertTrue(t, "" == c.getParam(1), "should return empty string") + + // Escape sequence - ESC[ + c = parseAnsiCommand(StringToBytes("\x1B[m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "" == c.getParam(0), "should return empty string") + assertTrue(t, "" == c.getParam(1), "should return empty string") + + // Escape sequence With empty parameters- ESC[ + c = parseAnsiCommand(StringToBytes("\x1B[;m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "" == c.getParam(0), "should return empty string") + assertTrue(t, "" == c.getParam(1), "should return empty string") + assertTrue(t, "" == c.getParam(2), "should return empty string") + + // Escape sequence With empty muliple parameters- ESC[ + c = parseAnsiCommand(StringToBytes("\x1B[;;m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "" == c.getParam(0), "") + assertTrue(t, "" == c.getParam(1), "") + assertTrue(t, "" == c.getParam(2), "") + + // Escape sequence With muliple parameters- ESC[ + c = parseAnsiCommand(StringToBytes("\x1B[1;2;3m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "1" == c.getParam(0), "") + assertTrue(t, "2" == c.getParam(1), "") + assertTrue(t, "3" == c.getParam(2), "") + + // Escape sequence With muliple parameters- some missing + c = parseAnsiCommand(StringToBytes("\x1B[1;;3;;;6m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "1" == c.getParam(0), "") + assertTrue(t, "" == c.getParam(1), "") + assertTrue(t, "3" == c.getParam(2), "") + assertTrue(t, "" == c.getParam(3), "") + assertTrue(t, "" == c.getParam(4), "") + assertTrue(t, "6" == c.getParam(5), "") +} + +func newBufferedMockTerm() (stdOut io.Writer, stdErr io.Writer, stdIn io.ReadCloser, mock *mockTerminal) { + var input bytes.Buffer + var output bytes.Buffer + var err bytes.Buffer + + mock = &mockTerminal{ + OutputCommandSequence: make([]terminalOperation, 0, 256), + } + + stdOut = &terminalWriter{ + wrappedWriter: &output, + emulator: mock, + command: make([]byte, 0, 256), + } + stdErr = &terminalWriter{ + wrappedWriter: &err, + emulator: mock, + command: make([]byte, 0, 256), + } + stdIn = &terminalReader{ + wrappedReader: ioutil.NopCloser(&input), + emulator: mock, + command: make([]byte, 0, 256), + } + + return +} + +func TestOutputSimple(t *testing.T) { + stdOut, _, _, mock := newBufferedMockTerm() + + stdOut.Write(StringToBytes("Hello world")) + stdOut.Write(StringToBytes("\x1BmHello again")) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) + assertBytesEqual(t, StringToBytes("\x1Bm"), mock.OutputCommandSequence[1].Data, "Command data should match") + + assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[2].Data, "Write data should match") +} + +func TestOutputSplitCommand(t *testing.T) { + stdOut, _, _, mock := newBufferedMockTerm() + + stdOut.Write(StringToBytes("Hello world\x1B[1;2;3")) + stdOut.Write(StringToBytes("mHello again")) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) + assertBytesEqual(t, StringToBytes("\x1B[1;2;3m"), mock.OutputCommandSequence[1].Data, "Command data should match") + + assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[2].Data, "Write data should match") +} + +func TestOutputMultipleCommands(t *testing.T) { + stdOut, _, _, mock := newBufferedMockTerm() + + stdOut.Write(StringToBytes("Hello world")) + stdOut.Write(StringToBytes("\x1B[1;2;3m")) + stdOut.Write(StringToBytes("\x1B[J")) + stdOut.Write(StringToBytes("Hello again")) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) + assertBytesEqual(t, StringToBytes("\x1B[1;2;3m"), mock.OutputCommandSequence[1].Data, "Command data should match") + + assertTrue(t, mock.OutputCommandSequence[2].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) + assertBytesEqual(t, StringToBytes("\x1B[J"), mock.OutputCommandSequence[2].Data, "Command data should match") + + assertTrue(t, mock.OutputCommandSequence[3].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[3].Data, "Write data should match") +} + +// Splits the given data in two chunks , makes two writes and checks the split data is parsed correctly +// checks output write/command is passed to handler correctly +func helpsTestOutputSplitChunksAtIndex(t *testing.T, i int, data []byte) { + t.Logf("\ni=%d", i) + stdOut, _, _, mock := newBufferedMockTerm() + + t.Logf("\nWriting chunk[0] == %s", string(data[:i])) + t.Logf("\nWriting chunk[1] == %s", string(data[i:])) + stdOut.Write(data[:i]) + stdOut.Write(data[i:]) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[:i], mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[i:], mock.OutputCommandSequence[1].Data, "Write data should match") +} + +// Splits the given data in three chunks , makes three writes and checks the split data is parsed correctly +// checks output write/command is passed to handler correctly +func helpsTestOutputSplitThreeChunksAtIndex(t *testing.T, data []byte, i int, j int) { + stdOut, _, _, mock := newBufferedMockTerm() + + t.Logf("\nWriting chunk[0] == %s", string(data[:i])) + t.Logf("\nWriting chunk[1] == %s", string(data[i:j])) + t.Logf("\nWriting chunk[2] == %s", string(data[j:])) + stdOut.Write(data[:i]) + stdOut.Write(data[i:j]) + stdOut.Write(data[j:]) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[:i], mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[i:j], mock.OutputCommandSequence[1].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[j:], mock.OutputCommandSequence[2].Data, "Write data should match") +} + +// Splits the output into two parts and tests all such possible pairs +func helpsTestOutputSplitChunks(t *testing.T, data []byte) { + for i := 1; i < len(data)-1; i++ { + helpsTestOutputSplitChunksAtIndex(t, i, data) + } +} + +// Splits the output in three parts and tests all such possible triples +func helpsTestOutputSplitThreeChunks(t *testing.T, data []byte) { + for i := 1; i < len(data)-2; i++ { + for j := i + 1; j < len(data)-1; j++ { + helpsTestOutputSplitThreeChunksAtIndex(t, data, i, j) + } + } +} + +func helpsTestOutputSplitCommandsAtIndex(t *testing.T, data []byte, i int, plainText string, commands ...string) { + t.Logf("\ni=%d", i) + stdOut, _, _, mock := newBufferedMockTerm() + + stdOut.Write(data[:i]) + stdOut.Write(data[i:]) + assertHandlerOutput(t, mock, plainText, commands...) +} + +func helpsTestOutputSplitCommands(t *testing.T, data []byte, plainText string, commands ...string) { + for i := 1; i < len(data)-1; i++ { + helpsTestOutputSplitCommandsAtIndex(t, data, i, plainText, commands...) + } +} + +func injectCommandAt(data string, i int, command string) string { + retValue := make([]byte, len(data)+len(command)+4) + retValue = append(retValue, data[:i]...) + retValue = append(retValue, data[i:]...) + return string(retValue) +} + +func TestOutputSplitChunks(t *testing.T) { + data := StringToBytes("qwertyuiopasdfghjklzxcvbnm") + helpsTestOutputSplitChunks(t, data) + helpsTestOutputSplitChunks(t, StringToBytes("BBBBB")) + helpsTestOutputSplitThreeChunks(t, StringToBytes("ABCDE")) +} + +func TestOutputSplitChunksIncludingCommands(t *testing.T) { + helpsTestOutputSplitCommands(t, StringToBytes("Hello world.\x1B[mHello again."), "Hello world.Hello again.", "\x1B[m") + helpsTestOutputSplitCommandsAtIndex(t, StringToBytes("Hello world.\x1B[mHello again."), 2, "Hello world.Hello again.", "\x1B[m") +} + +func TestSplitChunkUnicode(t *testing.T) { + for _, l := range languages { + data := StringToBytes(l) + helpsTestOutputSplitChunks(t, data) + helpsTestOutputSplitThreeChunks(t, data) + } +} diff --git a/pkg/timeoutconn/timeoutconn.go b/pkg/timeoutconn/timeoutconn.go new file mode 100644 index 00000000..d9534b5d --- /dev/null +++ b/pkg/timeoutconn/timeoutconn.go @@ -0,0 +1,25 @@ +package timeoutconn + +import ( + "net" + "time" +) + +func New(netConn net.Conn, timeout time.Duration) net.Conn { + return &conn{netConn, timeout} +} + +// A net.Conn that sets a deadline for every Read or Write operation +type conn struct { + net.Conn + timeout time.Duration +} + +func (c *conn) Read(b []byte) (int, error) { + if c.timeout > 0 { + if err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout)); err != nil { + return 0, err + } + } + return c.Conn.Read(b) +} diff --git a/pkg/timeoutconn/timeoutconn_test.go b/pkg/timeoutconn/timeoutconn_test.go new file mode 100644 index 00000000..46d6477c --- /dev/null +++ b/pkg/timeoutconn/timeoutconn_test.go @@ -0,0 +1,33 @@ +package timeoutconn + +import ( + "bufio" + "fmt" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestRead(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "hello") + })) + defer ts.Close() + conn, err := net.Dial("tcp", ts.URL[7:]) + if err != nil { + t.Fatalf("failed to create connection to %q: %v", ts.URL, err) + } + tconn := New(conn, 1*time.Second) + + if _, err = bufio.NewReader(tconn).ReadString('\n'); err == nil { + t.Fatalf("expected timeout error, got none") + } + if _, err := fmt.Fprintf(tconn, "GET / HTTP/1.0\r\n\r\n"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if _, err = bufio.NewReader(tconn).ReadString('\n'); err != nil { + t.Errorf("unexpected error: %v", err) + } +} diff --git a/pkg/timeutils/json.go b/pkg/timeutils/json.go new file mode 100644 index 00000000..8043d69d --- /dev/null +++ b/pkg/timeutils/json.go @@ -0,0 +1,26 @@ +package timeutils + +import ( + "errors" + "time" +) + +const ( + // RFC3339NanoFixed is our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +// FastMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func FastMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff --git a/pkg/timeutils/json_test.go b/pkg/timeutils/json_test.go new file mode 100644 index 00000000..1ff33317 --- /dev/null +++ b/pkg/timeutils/json_test.go @@ -0,0 +1,47 @@ +package timeutils + +import ( + "testing" + "time" +) + +// Testing to ensure 'year' fields is between 0 and 9999 +func TestFastMarshalJSONWithInvalidDate(t *testing.T) { + aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) + json, err := FastMarshalJSON(aTime) + if err == nil { + t.Fatalf("FastMarshalJSON should throw an error, but was '%v'", json) + } + anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) + json, err = FastMarshalJSON(anotherTime) + if err == nil { + t.Fatalf("FastMarshalJSON should throw an error, but was '%v'", json) + } + +} + +func TestFastMarshalJSON(t *testing.T) { + aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) + json, err := FastMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected := "\"2015-05-29T11:01:02.000000003Z\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } + + location, err := time.LoadLocation("Europe/Paris") + if err != nil { + t.Fatal(err) + } + aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) + json, err = FastMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected = "\"2015-05-29T11:01:02.000000003+02:00\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } +} diff --git a/pkg/timeutils/utils.go b/pkg/timeutils/utils.go new file mode 100644 index 00000000..8437f124 --- /dev/null +++ b/pkg/timeutils/utils.go @@ -0,0 +1,36 @@ +package timeutils + +import ( + "strconv" + "strings" + "time" +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) string { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10) + } + + var format string + if strings.Contains(value, ".") { + format = time.RFC3339Nano + } else { + format = time.RFC3339 + } + + loc := time.FixedZone(time.Now().Zone()) + if len(value) < len(format) { + format = format[:len(value)] + } + t, err := time.ParseInLocation(format, value, loc) + if err != nil { + return value + } + return strconv.FormatInt(t.Unix(), 10) +} diff --git a/pkg/timeutils/utils_test.go b/pkg/timeutils/utils_test.go new file mode 100644 index 00000000..f71dcb53 --- /dev/null +++ b/pkg/timeutils/utils_test.go @@ -0,0 +1,44 @@ +package timeutils + +import ( + "fmt" + "testing" + "time" +) + +func TestGetTimestamp(t *testing.T) { + now := time.Now() + cases := []struct{ in, expected string }{ + {"0", "-62167305600"}, // 0 gets parsed year 0 + + // Partial RFC3339 strings get parsed with second precision + {"2006-01-02T15:04:05.999999999+07:00", "1136189045"}, + {"2006-01-02T15:04:05.999999999Z", "1136214245"}, + {"2006-01-02T15:04:05.999999999", "1136214245"}, + {"2006-01-02T15:04:05", "1136214245"}, + {"2006-01-02T15:04", "1136214240"}, + {"2006-01-02T15", "1136214000"}, + {"2006-01-02T", "1136160000"}, + {"2006-01-02", "1136160000"}, + {"2006", "1136073600"}, + {"2015-05-13T20:39:09Z", "1431549549"}, + + // unix timestamps returned as is + {"1136073600", "1136073600"}, + + // Durations + {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix())}, + {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix())}, + {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix())}, + + // String fallback + {"invalid", "invalid"}, + } + + for _, c := range cases { + o := GetTimestamp(c.in, now) + if o != c.expected { + t.Fatalf("wrong value for '%s'. expected:'%s' got:'%s'", c.in, c.expected, o) + } + } +} diff --git a/pkg/tlsconfig/config.go b/pkg/tlsconfig/config.go new file mode 100644 index 00000000..9f7f3369 --- /dev/null +++ b/pkg/tlsconfig/config.go @@ -0,0 +1,132 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, +} + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} + +// For use by code which already has a crypto/tls options struct but wants to +// use a commonly accepted set of TLS cipher suites, with known weak algorithms removed +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. +var ServerDefault = tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, +} + +// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. +var ClientDefault = tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + certPool := x509.NewCertPool() + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + s := certPool.Subjects() + subjects := make([]string, len(s)) + for i, subject := range s { + subjects[i] = string(subject) + } + logrus.Debugf("Trusting certs with subjects: %v", subjects) + return certPool, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify { + CAs, err := certPool(options.CAFile) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + if options.CertFile != "" && options.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + } + + return &tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven { + CAs, err := certPool(options.CAFile) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + return &tlsConfig, nil +} diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go new file mode 100644 index 00000000..72d525a1 --- /dev/null +++ b/pkg/truncindex/truncindex.go @@ -0,0 +1,118 @@ +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + ErrEmptyPrefix = errors.New("Prefix can't be empty") + ErrAmbiguousPrefix = errors.New("Multiple IDs found with provided prefix") +) + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return fmt.Errorf("illegal character: ' '") + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addID(id); err != nil { + return err + } + return nil +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", fmt.Errorf("no such id: %s", s) +} + +// Iterates over all stored IDs, and passes each of them to the given handler +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go new file mode 100644 index 00000000..cc7bc01d --- /dev/null +++ b/pkg/truncindex/truncindex_test.go @@ -0,0 +1,429 @@ +package truncindex + +import ( + "math/rand" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + ids := []string{} + index := NewTruncIndex(ids) + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + + // Add an empty id (should fail) + if err := index.Add(""); err == nil { + t.Fatalf("Adding an empty id should return an error") + } + + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get an empty id + assertIndexGet(t, index, "", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // An ambiguous id prefix should return an error + if _, err := index.Get(id[:4]); err == nil { + t.Fatal("An ambiguous id prefix should return an error") + } + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting an empty id should return an error + if err := index.Delete(""); err == nil { + t.Fatal("Deleting an empty id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) + + assertIndexIterate(t) +} + +func assertIndexIterate(t *testing.T) { + ids := []string{ + "19b36c2c326ccc11e726eee6ee78a0baf166ef96", + "28b36c2c326ccc11e726eee6ee78a0baf166ef96", + "37b36c2c326ccc11e726eee6ee78a0baf166ef96", + "46b36c2c326ccc11e726eee6ee78a0baf166ef96", + } + + index := NewTruncIndex(ids) + + index.Iterate(func(targetId string) { + for _, id := range ids { + if targetId == id { + return + } + } + + t.Fatalf("An unknown ID '%s'", targetId) + }) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func BenchmarkTruncIndexAdd100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexDelete100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexNew100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexAddGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} diff --git a/pkg/ulimit/ulimit.go b/pkg/ulimit/ulimit.go new file mode 100644 index 00000000..eb2ae4e8 --- /dev/null +++ b/pkg/ulimit/ulimit.go @@ -0,0 +1,106 @@ +package ulimit + +import ( + "fmt" + "strconv" + "strings" +) + +// Human friendly version of Rlimit +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + RLIMIT_AS = 9 + RLIMIT_CORE = 4 + RLIMIT_CPU = 0 + RLIMIT_DATA = 2 + RLIMIT_FSIZE = 1 + RLIMIT_LOCKS = 10 + RLIMIT_MEMLOCK = 8 + RLIMIT_MSGQUEUE = 12 + RLIMIT_NICE = 13 + RLIMIT_NOFILE = 7 + RLIMIT_NPROC = 6 + RLIMIT_RSS = 5 + RLIMIT_RTPRIO = 14 + RLIMIT_RTTIME = 15 + RLIMIT_SIGPENDING = 11 + RLIMIT_STACK = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": RLIMIT_AS, // Disbaled since this doesn't seem usable with the way Docker inits a container. + "core": RLIMIT_CORE, + "cpu": RLIMIT_CPU, + "data": RLIMIT_DATA, + "fsize": RLIMIT_FSIZE, + "locks": RLIMIT_LOCKS, + "memlock": RLIMIT_MEMLOCK, + "msgqueue": RLIMIT_MSGQUEUE, + "nice": RLIMIT_NICE, + "nofile": RLIMIT_NOFILE, + "nproc": RLIMIT_NPROC, + "rss": RLIMIT_RSS, + "rtprio": RLIMIT_RTPRIO, + "rttime": RLIMIT_RTTIME, + "sigpending": RLIMIT_SIGPENDING, + "stack": RLIMIT_STACK, +} + +func Parse(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + limitVals := strings.SplitN(parts[1], ":", 2) + if len(limitVals) > 2 { + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + soft, err := strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + + hard := soft // in case no hard was set + if len(limitVals) == 2 { + hard, err = strconv.ParseInt(limitVals[1], 10, 64) + } + if soft > hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil +} + +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/pkg/ulimit/ulimit_test.go b/pkg/ulimit/ulimit_test.go new file mode 100644 index 00000000..1e8c881f --- /dev/null +++ b/pkg/ulimit/ulimit_test.go @@ -0,0 +1,55 @@ +package ulimit + +import "testing" + +func TestParseValid(t *testing.T) { + u1 := &Ulimit{"nofile", 1024, 512} + if u2, _ := Parse("nofile=512:1024"); *u1 != *u2 { + t.Fatalf("expected %q, but got %q", u1, u2) + } +} + +func TestParseInvalidLimitType(t *testing.T) { + if _, err := Parse("notarealtype=1024:1024"); err == nil { + t.Fatalf("expected error on invalid ulimit type") + } +} + +func TestParseBadFormat(t *testing.T) { + if _, err := Parse("nofile:1024:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := Parse("nofile"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := Parse("nofile="); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := Parse("nofile=:"); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := Parse("nofile=:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } +} + +func TestParseHardLessThanSoft(t *testing.T) { + if _, err := Parse("nofile:1024:1"); err == nil { + t.Fatal("expected error on hard limit less than soft limit") + } +} + +func TestParseInvalidValueType(t *testing.T) { + if _, err := Parse("nofile:asdf"); err == nil { + t.Fatal("expected error on bad value type") + } +} + +func TestStringOutput(t *testing.T) { + u := &Ulimit{"nofile", 1024, 512} + if s := u.String(); s != "nofile=512:1024" { + t.Fatal("expected String to return nofile=512:1024, but got", s) + } +} diff --git a/pkg/units/duration.go b/pkg/units/duration.go new file mode 100644 index 00000000..44012aaf --- /dev/null +++ b/pkg/units/duration.go @@ -0,0 +1,31 @@ +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.) +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/pkg/units/duration_test.go b/pkg/units/duration_test.go new file mode 100644 index 00000000..fcfb6b7b --- /dev/null +++ b/pkg/units/duration_test.go @@ -0,0 +1,46 @@ +package units + +import ( + "testing" + "time" +) + +func TestHumanDuration(t *testing.T) { + // Useful duration abstractions + day := 24 * time.Hour + week := 7 * day + month := 30 * day + year := 365 * day + + assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) + assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) + assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) + assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) + assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) + assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) + assertEquals(t, "2 days", HumanDuration(2*day)) + assertEquals(t, "7 days", HumanDuration(7*day)) + assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) + assertEquals(t, "2 weeks", HumanDuration(2*week)) + assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) + assertEquals(t, "3 weeks", HumanDuration(3*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) + assertEquals(t, "4 weeks", HumanDuration(1*month)) + assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) + assertEquals(t, "8 weeks", HumanDuration(2*month)) + assertEquals(t, "3 months", HumanDuration(3*month+1*week)) + assertEquals(t, "5 months", HumanDuration(5*month+2*week)) + assertEquals(t, "13 months", HumanDuration(13*month)) + assertEquals(t, "23 months", HumanDuration(23*month)) + assertEquals(t, "24 months", HumanDuration(24*month)) + assertEquals(t, "2 years", HumanDuration(24*month+2*week)) + assertEquals(t, "3 years", HumanDuration(3*year+2*month)) +} diff --git a/pkg/units/size.go b/pkg/units/size.go new file mode 100644 index 00000000..9e84697c --- /dev/null +++ b/pkg/units/size.go @@ -0,0 +1,93 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +// CustomSize returns a human-readable approximation of a size +// using custom format +func CustomSize(format string, size float64, base float64, _map []string) string { + i := 0 + for size >= base { + size = size / base + i++ + } + return fmt.Sprintf(format, size, _map[i]) +} + +// HumanSize returns a human-readable approximation of a size +// using SI standard (eg. "44kB", "17MB") +func HumanSize(size float64) string { + return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) +} + +func BytesSize(size float64) string { + return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB") +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 3 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[2]) + if mul, ok := uMap[unitPrefix]; ok { + size *= mul + } + + return size, nil +} diff --git a/pkg/units/size_test.go b/pkg/units/size_test.go new file mode 100644 index 00000000..67c3b81e --- /dev/null +++ b/pkg/units/size_test.go @@ -0,0 +1,108 @@ +package units + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func TestBytesSize(t *testing.T) { + assertEquals(t, "1 KiB", BytesSize(1024)) + assertEquals(t, "1 MiB", BytesSize(1024*1024)) + assertEquals(t, "1 MiB", BytesSize(1048576)) + assertEquals(t, "2 MiB", BytesSize(2*MiB)) + assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) + assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) + assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) +} + +func TestHumanSize(t *testing.T) { + assertEquals(t, "1 kB", HumanSize(1000)) + assertEquals(t, "1.024 kB", HumanSize(1024)) + assertEquals(t, "1 MB", HumanSize(1000000)) + assertEquals(t, "1.049 MB", HumanSize(1048576)) + assertEquals(t, "2 MB", HumanSize(2*MB)) + assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) + assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) + assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) +} + +func TestFromHumanSize(t *testing.T) { + assertSuccessEquals(t, 32, FromHumanSize, "32") + assertSuccessEquals(t, 32, FromHumanSize, "32b") + assertSuccessEquals(t, 32, FromHumanSize, "32B") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") + assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") + assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") + assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") + assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") + + assertError(t, FromHumanSize, "") + assertError(t, FromHumanSize, "hello") + assertError(t, FromHumanSize, "-32") + assertError(t, FromHumanSize, "32.3") + assertError(t, FromHumanSize, " 32 ") + assertError(t, FromHumanSize, "32.3Kb") + assertError(t, FromHumanSize, "32 mb") + assertError(t, FromHumanSize, "32m b") + assertError(t, FromHumanSize, "32bm") +} + +func TestRAMInBytes(t *testing.T) { + assertSuccessEquals(t, 32, RAMInBytes, "32") + assertSuccessEquals(t, 32, RAMInBytes, "32b") + assertSuccessEquals(t, 32, RAMInBytes, "32B") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") + assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") + assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") + assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") + + assertError(t, RAMInBytes, "") + assertError(t, RAMInBytes, "hello") + assertError(t, RAMInBytes, "-32") + assertError(t, RAMInBytes, "32.3") + assertError(t, RAMInBytes, " 32 ") + assertError(t, RAMInBytes, "32.3Kb") + assertError(t, RAMInBytes, "32 mb") + assertError(t, RAMInBytes, "32m b") + assertError(t, RAMInBytes, "32bm") +} + +func assertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + t.Errorf("Expected '%v' but got '%v'", expected, actual) + } +} + +// func that maps to the parse function signatures as testing abstraction +type parseFn func(string) (int64, error) + +// Define 'String()' for pretty-print +func (fn parseFn) String() string { + fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() + return fnName[strings.LastIndex(fnName, ".")+1:] +} + +func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { + res, err := fn(arg) + if err != nil || res != expected { + t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) + } +} + +func assertError(t *testing.T, fn parseFn, arg string) { + res, err := fn(arg) + if err == nil && res != -1 { + t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) + } +} diff --git a/pkg/urlutil/urlutil.go b/pkg/urlutil/urlutil.go new file mode 100644 index 00000000..7250643d --- /dev/null +++ b/pkg/urlutil/urlutil.go @@ -0,0 +1,48 @@ +package urlutil + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "udp://", "unix://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/pkg/urlutil/urlutil_test.go b/pkg/urlutil/urlutil_test.go new file mode 100644 index 00000000..bb89d8b5 --- /dev/null +++ b/pkg/urlutil/urlutil_test.go @@ -0,0 +1,55 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } + invalidGitUrls = []string{ + "http://github.com/docker/docker.git:#branch", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range invalidGitUrls { + if IsGitURL(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} diff --git a/pkg/useragent/README.md b/pkg/useragent/README.md new file mode 100644 index 00000000..d9cb367d --- /dev/null +++ b/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/pkg/useragent/useragent.go b/pkg/useragent/useragent.go new file mode 100644 index 00000000..9e35d1c7 --- /dev/null +++ b/pkg/useragent/useragent.go @@ -0,0 +1,60 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "errors" + "strings" +) + +var ( + ErrNilRequest = errors.New("request cannot be nil") +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// Convert versions to a string and append the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of verson information +// will be concatinated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/pkg/useragent/useragent_test.go b/pkg/useragent/useragent_test.go new file mode 100644 index 00000000..0ad7243a --- /dev/null +++ b/pkg/useragent/useragent_test.go @@ -0,0 +1,31 @@ +package useragent + +import "testing" + +func TestVersionInfo(t *testing.T) { + vi := VersionInfo{"foo", "bar"} + if !vi.isValid() { + t.Fatalf("VersionInfo should be valid") + } + vi = VersionInfo{"", "bar"} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } + vi = VersionInfo{"foo", ""} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } +} + +func TestAppendVersions(t *testing.T) { + vis := []VersionInfo{ + {"foo", "1.0"}, + {"bar", "0.1"}, + {"pi", "3.1.4"}, + } + v := AppendVersions("base", vis...) + expect := "base foo/1.0 bar/0.1 pi/3.1.4" + if v != expect { + t.Fatalf("expected %q, got %q", expect, v) + } +} diff --git a/pkg/version/version.go b/pkg/version/version.go new file mode 100644 index 00000000..bd5ec7a8 --- /dev/null +++ b/pkg/version/version.go @@ -0,0 +1,63 @@ +package version + +import ( + "strconv" + "strings" +) + +// Version provides utility methods for comparing versions. +type Version string + +func (v Version) compareTo(other Version) int { + var ( + currTab = strings.Split(string(v), ".") + otherTab = strings.Split(string(other), ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func (v Version) LessThan(other Version) bool { + return v.compareTo(other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func (v Version) LessThanOrEqualTo(other Version) bool { + return v.compareTo(other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func (v Version) GreaterThan(other Version) bool { + return v.compareTo(other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func (v Version) GreaterThanOrEqualTo(other Version) bool { + return v.compareTo(other) >= 0 +} + +// Equal checks if a version is equal to another +func (v Version) Equal(other Version) bool { + return v.compareTo(other) == 0 +} diff --git a/pkg/version/version_test.go b/pkg/version/version_test.go new file mode 100644 index 00000000..c02ec40f --- /dev/null +++ b/pkg/version/version_test.go @@ -0,0 +1,27 @@ +package version + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := Version(a).compareTo(Version(b)); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) + +} diff --git a/project/CONTRIBUTORS.md b/project/CONTRIBUTORS.md new file mode 120000 index 00000000..44fcc634 --- /dev/null +++ b/project/CONTRIBUTORS.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/project/GOVERNANCE.md b/project/GOVERNANCE.md new file mode 100644 index 00000000..6ae7baf7 --- /dev/null +++ b/project/GOVERNANCE.md @@ -0,0 +1,17 @@ +# Docker Governance Advisory Board Meetings + +In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. +All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. + +The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at +[Google Docs Folder](https://goo.gl/Alfj8r) + +These include: + +* First Meeting Notes +* DGAB Charter +* Presentation 1: Introductory Presentation, including State of The Project +* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal +* Presentation 3: Long Term Roadmap/Statement of Direction + + diff --git a/project/IRC-ADMINISTRATION.md b/project/IRC-ADMINISTRATION.md new file mode 100644 index 00000000..824a14bd --- /dev/null +++ b/project/IRC-ADMINISTRATION.md @@ -0,0 +1,37 @@ +# Freenode IRC Administration Guidelines and Tips + +This is not meant to be a general "Here's how to IRC" document, so if you're +looking for that, check Google instead. ♥ + +If you've been charged with helping maintain one of Docker's now many IRC +channels, this might turn out to be useful. If there's information that you +wish you'd known about how a particular channel is organized, you should add +deets here! :) + +## `ChanServ` + +Most channel maintenance happens by talking to Freenode's `ChanServ` bot. For +example, `/msg ChanServ ACCESS LIST` will show you a list of everyone +with "access" privileges for a particular channel. + +A similar command is used to give someone a particular access level. For +example, to add a new maintainer to the `#docker-maintainers` access list so +that they can contribute to the discussions (after they've been merged +appropriately in a `MAINTAINERS` file, of course), one would use `/msg ChanServ +ACCESS #docker-maintainers ADD maintainer`. + +To setup a new channel with a similar `maintainer` access template, use a +command like `/msg ChanServ TEMPLATE maintainer +AV` (`+A` for letting +them view the `ACCESS LIST`, `+V` for auto-voice; see `/msg ChanServ HELP FLAGS` +for more details). + +## Troubleshooting + +The most common cause of not-getting-auto-`+v` woes is people not being +`IDENTIFY`ed with `NickServ` (or their current nickname not being `GROUP`ed with +their main nickname) -- often manifested by `ChanServ` responding to an `ACCESS +ADD` request with something like `xyz is not registered.`. + +This is easily fixed by doing `/msg NickServ IDENTIFY OldNick SecretPassword` +followed by `/msg NickServ GROUP` to group the two nicknames together. See +`/msg NickServ HELP GROUP` for more information. diff --git a/project/ISSUE-TRIAGE.md b/project/ISSUE-TRIAGE.md new file mode 100644 index 00000000..84bbf0fa --- /dev/null +++ b/project/ISSUE-TRIAGE.md @@ -0,0 +1,104 @@ +Triaging of issues +------------------ + +Triage provides an important way to contribute to an open source project. Triage helps ensure issues resolve quickly by: + +- Describing the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences an problem and what actions they took. + +- Giving a contributor the information they need before they commit to resolving an issue. + +- Lowering the issue count by preventing duplicate issues. + +- Streamlining the development process by preventing duplicate discussions. + +If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +### Step 1: Ensure the issue contains basic information + +Before triaging an issue very far, make sure that the issue's author provided the standard issue information. This will help you make an educated recommendation on how this to categorize the issue. Standard information that *must* be included in most issues are things such as: + +- the output of `docker version` +- the output of `docker info` +- the output of `uname -a` +- a reproducible case if this is a bug, Dockerfiles FTW +- host distribution and version ( ubuntu 14.04, RHEL, fedora 21 ) +- page URL if this is a docs issue or the name of a man page + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. + +If the author does not respond requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be +reopened when the necessary information is provided. + +### Step 2: Apply the template + +When triaging, use the standard template below. You should cut and place the template in the issue's description. +The template helps other reviewers find key information in an issue. For example, using a template saves a +potential contributor from wading though 100s of comments to find a proposed solution at the very end. When adding +the template to the issue's description also add any required labels to the issue for the classification and difficulty. + +Here is a sample summary for an [issue](https://github.com/docker/docker/issues/10545). + +``` +**Summary**: docker rm can return a non-zero exit code if the container does not +exist and it is not easy to parse the error message. + +**Proposed solution**: + +docker rm should have consistent exit codes for different types of errors so +that the user can easily script and know the reason why the command failed. + +``` + +### Step 3: Classify the Issue + +Classifications help both to inform readers about an issue's priority and how to resolve it. +This is also helpful for identifying new, critical issues. "Kinds of" are +applied to the issue or pull request using labels. You can apply one or more labels. + + +Kinds of classifications: + +| Kind | Description | +|------------------|---------------------------------------------------------------------------------------------------------------------------------| +| kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. | +| kind/cleanup | Refactoring code or otherwise clarifying documentation. | +| kind/content | Content that is not documentation such as help or error messages. | +| kind/graphics | Work involving graphics skill | +| kind/regression | Regressions are usually easy fixes as hopefully the action worked previously and git history can be used to propose a solution. | +| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | +| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shinny. | +| kind/question | Contains a user or contributor question requiring a response. | +| kind/usecase | A description of a user or contributor situation requiring a response perhaps in code or documentation. | +| kind/writing | Writing documentation, man pages, articles, blogs, or other significant word-driven task. | +| kind/test | Tests or test infrastructure needs adding or updating. | + + +Contributors can add labels by using a `+kind/bug` in an issue or pull request comment. + +### Step 4: Estimate the experience level required + +Experience level is a way for a contributor to find an issue based on their +skill set. Experience types are applied to the issue or pull request using +labels. + +| Level | Experience level guideline | +|------------------|--------------------------------------------------------------------------------------------------------------------------| +| exp/beginner | You have made less than 10 contributions in your life time to any open source project. | +| exp/novice | You have made more than 10 contributions to an open source project or at least 5 contributions to Docker. | +| exp/proficient | You have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines. | +| exp/expert | You have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines. | +| exp/master | You have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines. | + +As the table states, these labels are meant as guidelines. You might have +written a whole plugin for Docker in a personal project and never contributed to +Docker. With that kind of experience, you could take on an exp/expert or exp/master level task. + +Contributors can add labels by using a `+exp/expert` format in issue comment. + + +And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue. + diff --git a/project/PACKAGERS.md b/project/PACKAGERS.md new file mode 100644 index 00000000..6a540c28 --- /dev/null +++ b/project/PACKAGERS.md @@ -0,0 +1,330 @@ +# Dear Packager, + +If you are looking to make Docker available on your favorite software +distribution, this document is for you. It summarizes the requirements for +building and running the Docker client and the Docker daemon. + +## Getting Started + +We want to help you package Docker successfully. Before doing any packaging, a +good first step is to introduce yourself on the [docker-dev mailing +list](https://groups.google.com/d/forum/docker-dev), explain what you're trying +to achieve, and tell us how we can help. Don't worry, we don't bite! There might +even be someone already working on packaging for the same distro! + +You can also join the IRC channel - #docker and #docker-dev on Freenode are both +active and friendly. + +We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our +"Packagers Relations", since he's always working to make sure our packagers have +a good, healthy upstream to work with (both in our communication and in our +build scripts). If you're having any kind of trouble, feel free to ping him +directly. He also likes to keep track of what distributions we have packagers +for, so feel free to reach out to him even just to say "Hi!" + +## Package Name + +If possible, your package should be called "docker". If that name is already +taken, a second choice is "docker-engine". Another possible choice is "docker.io". + +## Official Build vs Distro Build + +The Docker project maintains its own build and release toolchain. It is pretty +neat and entirely based on Docker (surprise!). This toolchain is the canonical +way to build Docker. We encourage you to give it a try, and if the circumstances +allow you to use it, we recommend that you do. + +You might not be able to use the official build toolchain - usually because your +distribution has a toolchain and packaging policy of its own. We get it! Your +house, your rules. The rest of this document should give you the information you +need to package Docker your way, without denaturing it in the process. + +## Build Dependencies + +To build Docker, you will need the following: + +* A recent version of Git and Mercurial +* Go version 1.4 or later +* A clean checkout of the source added to a valid [Go + workspace](https://golang.org/doc/code.html#Workspaces) under the path + *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, + explained in more detail below) + +To build the Docker daemon, you will additionally need: + +* An amd64/x86_64 machine running Linux +* SQLite version 3.7.9 or later +* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version + 2.02.89 or later +* btrfs-progs version 3.16.1 or later (unless using an older version is + absolutely necessary, in which case 3.8 is the minimum) + +Be sure to also check out Docker's Dockerfile for the most up-to-date list of +these build-time dependencies. + +### Go Dependencies + +All Go dependencies are vendored under "./vendor". They are used by the official +build, so the source of truth for the current version of each dependency is +whatever is in "./vendor". + +To use the vendored dependencies, simply make sure the path to "./vendor" is +included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). + +If you would rather (or must, due to distro policy) package these dependencies +yourself, take a look at "./hack/vendor.sh" for an easy-to-parse list of the +exact version for each. + +NOTE: if you're not able to package the exact version (to the exact commit) of a +given dependency, please get in touch so we can remediate! Who knows what +discrepancies can be caused by even the slightest deviation. We promise to do +our best to make everybody happy. + +## Stripping Binaries + +Please, please, please do not strip any compiled binaries. This is really +important. + +In our own testing, stripping the resulting binaries sometimes results in a +binary that appears to work, but more often causes random panics, segfaults, and +other issues. Even if the binary appears to work, please don't strip. + +See the following quotes from Dave Cheney, which explain this position better +from the upstream Golang perspective. + +### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) + +> Super super important: Do not strip go binaries or archives. It isn't tested, +> often breaks, and doesn't work. + +### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) + +> To quote myself: "Please do not strip Go binaries, it is not supported, not +> tested, is often broken, and doesn't do what you want" +> +> To unpack that a bit +> +> * not supported, as in, we don't support it, and recommend against it when +> asked +> * not tested, we don't test stripped binaries as part of the build CI process +> * is often broken, stripping a go binary will produce anywhere from no, to +> subtle, to outright execution failure, see above + +### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) + +> To clarify my previous statements. +> +> * I do not disagree with the debian policy, it is there for a good reason +> * Having said that, it stripping Go binaries doesn't work, and nobody is +> looking at making it work, so there is that. +> +> Thanks for patching the build formula. + +## Building Docker + +Please use our build script ("./hack/make.sh") for all your compilation of +Docker. If there's something you need that it isn't doing, or something it could +be doing to make your life as a packager easier, please get in touch with Tianon +and help us rectify the situation. Chances are good that other packagers have +probably run into the same problems and a fix might already be in the works, but +none of us will know for sure unless you harass Tianon about it. :) + +All the commands listed within this section should be run with the Docker source +checkout as the current working directory. + +### `AUTO_GOPATH` + +If you'd rather not be bothered with the hassles that setting up `GOPATH` +appropriately can be, and prefer to just get a "build that works", you should +add something similar to this to whatever script or process you're using to +build Docker: + +```bash +export AUTO_GOPATH=1 +``` + +This will cause the build scripts to set up a reasonable `GOPATH` that +automatically and properly includes both docker/docker from the local +directory, and the local "./vendor" directory as necessary. + +### `DOCKER_BUILDTAGS` + +If you're building a binary that may need to be used on platforms that include +AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: +```bash +export DOCKER_BUILDTAGS='apparmor' +``` + +If you're building a binary that may need to be used on platforms that include +SELinux, you will need to use the `selinux` build tag: +```bash +export DOCKER_BUILDTAGS='selinux' +``` + +There are build tags for disabling graphdrivers as well. By default, support +for all graphdrivers are built in. + +To disable btrfs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' +``` + +To disable devicemapper: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' +``` + +To disable aufs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' +``` + +NOTE: if you need to set more than one build tag, space separate them: +```bash +export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' +``` + +### Static Daemon + +If it is feasible within the constraints of your distribution, you should +seriously consider packaging Docker as a single static binary. A good comparison +is Busybox, which is often packaged statically as a feature to enable mass +portability. Because of the unique way Docker operates, being similarly static +is a "feature". + +To build a static Docker daemon binary, run the following command (first +ensuring that all the necessary libraries are available in static form for +linking - see the "Build Dependencies" section above, and the relevant lines +within Docker's own Dockerfile that set up our official build environment): + +```bash +./hack/make.sh binary +``` + +This will create a static binary under +"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of +the file "./VERSION". This binary is usually installed somewhere like +"/usr/bin/docker". + +### Dynamic Daemon / Client-only Binary + +If you are only interested in a Docker client binary, set `DOCKER_CLIENTONLY` to a non-empty value using something similar to the following: (which will prevent the extra step of compiling dockerinit) + +```bash +export DOCKER_CLIENTONLY=1 +``` + +If you need to (due to distro policy, distro library availability, or for other +reasons) create a dynamically compiled daemon binary, or if you are only +interested in creating a client binary for Docker, use something similar to the +following: + +```bash +./hack/make.sh dynbinary +``` + +This will create "./bundles/$VERSION/dynbinary/docker-$VERSION", which for +client-only builds is the important file to grab and install as appropriate. + +For daemon builds, you will also need to grab and install +"./bundles/$VERSION/dynbinary/dockerinit-$VERSION", which is created from the +minimal set of Docker's codebase that _must_ be compiled statically (and is thus +a pure static binary). The acceptable locations Docker will search for this file +are as follows (in order): + +* as "dockerinit" in the same directory as the daemon binary (ie, if docker is + installed at "/usr/bin/docker", then "/usr/bin/dockerinit" will be the first + place this file is searched for) +* "/usr/libexec/docker/dockerinit" or "/usr/local/libexec/docker/dockerinit" + ([FHS 3.0 Draft](https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec)) +* "/usr/lib/docker/dockerinit" or "/usr/local/lib/docker/dockerinit" ([FHS + 2.3](https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA)) + +If (and please, only if) one of the paths above is insufficient due to distro +policy or similar issues, you may use the `DOCKER_INITPATH` environment variable +at compile-time as follows to set a different path for Docker to search: + +```bash +export DOCKER_INITPATH=/usr/lib/docker.io/dockerinit +``` + +If you find yourself needing this, please don't hesitate to reach out to Tianon +to see if it would be reasonable or helpful to add more paths to Docker's list, +especially if there's a relevant standard worth referencing (such as the FHS). + +Also, it goes without saying, but for the purposes of the daemon please consider +these two binaries ("docker" and "dockerinit") as if they were a single unit. +Mixing and matching can cause undesired consequences, and will fail to run +properly. + +## System Dependencies + +### Runtime Dependencies + +To function properly, the Docker daemon needs the following software to be +installed and available at runtime: + +* iptables version 1.4 or later +* procps (or similar provider of a "ps" executable) +* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, mkfs.xfs, tune2fs) +* XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) + +Additionally, the Docker client needs the following software to be installed and +available at runtime: + +* Git version 1.7 or later + +### Kernel Requirements + +The Docker daemon has very specific kernel requirements. Most pre-packaged +kernels already include the necessary options enabled. If you are building your +own kernel, you will either need to discover the options necessary via trial and +error, or check out the [Gentoo +ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), +in which a list is maintained (and if there are any issues or discrepancies in +that list, please contact Tianon so they can be rectified). + +Note that in client mode, there are no specific kernel requirements, and that +the client will even run on alternative platforms such as Mac OS X / Darwin. + +### Optional Dependencies + +Some of Docker's features are activated by using optional command-line flags or +by having support for them in the kernel or userspace. A few examples include: + +* LXC execution driver (requires version 1.0.7 or later of lxc and the lxc-libs) +* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at + least the "auplink" utility from aufs-tools) +* BTRFS graph driver (requires BTRFS support enabled in the kernel) +* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module) + +## Daemon Init Script + +Docker expects to run as a daemon at machine startup. Your package will need to +include a script for your distro's process supervisor of choice. Be sure to +check out the "contrib/init" folder in case a suitable init script already +exists (and if one does not, contact Tianon about whether it might be +appropriate for your distro's init script to live there too!). + +In general, Docker should be run as root, similar to the following: + +```bash +docker -d +``` + +Generally, a `DOCKER_OPTS` variable of some kind is available for adding more +flags (such as changing the graph driver to use BTRFS, switching the location of +"/var/lib/docker", etc). + +## Communicate + +As a final note, please do feel free to reach out to Tianon at any time for +pretty much anything. He really does love hearing from our packagers and wants +to make sure we're not being a "hostile upstream". As should be a given, we +appreciate the work our packagers do to make sure we have broad distribution! diff --git a/project/PRINCIPLES.md b/project/PRINCIPLES.md new file mode 100644 index 00000000..53f03018 --- /dev/null +++ b/project/PRINCIPLES.md @@ -0,0 +1,19 @@ +# Docker principles + +In the design and development of Docker we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Fewer components are better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between 2 options, choose the one that is easier to reverse. +* No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The less moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/project/README.md b/project/README.md new file mode 100644 index 00000000..3ed68cf2 --- /dev/null +++ b/project/README.md @@ -0,0 +1,24 @@ +# Hacking on Docker + +The `project/` directory holds information and tools for everyone involved in the process of creating and +distributing Docker, specifically: + +## Guides + +If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTORS.md](../CONTRIBUTING.md). + +If you're a *maintainer* or aspiring maintainer, you should read [MAINTAINERS](../MAINTAINERS). + +If you're a *packager* or aspiring packager, you should read [PACKAGERS.md](./PACKAGERS.md). + +If you're a maintainer in charge of a *release*, you should read [RELEASE-CHECKLIST.md](./RELEASE-CHECKLIST.md). + +## Roadmap + +A high-level roadmap is available at [ROADMAP.md](../ROADMAP.md). + + +## Build tools + +[hack/make.sh](../hack/make.sh) is the primary build tool for docker. It is used for compiling the official binary, +running the test suite, and pushing releases. diff --git a/project/RELEASE-CHECKLIST.md b/project/RELEASE-CHECKLIST.md new file mode 100644 index 00000000..00409094 --- /dev/null +++ b/project/RELEASE-CHECKLIST.md @@ -0,0 +1,391 @@ +# Release Checklist +## A maintainer's guide to releasing Docker + +So you're in charge of a Docker release? Cool. Here's what to do. + +If your experience deviates from this document, please document the changes +to keep it up-to-date. + +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/docker/docker" is named +"origin". If yours is not (for example, if you've chosen to name it "upstream" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. + +If you don't have an upstream remote, you can add one easily using something +like: + +```bash +export GITHUBUSER="YOUR_GITHUB_USER" +git remote add origin https://github.com/docker/docker.git +git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git +``` + +### 1. Pull from master and create a release branch + +Note: Even for major releases, all of X, Y and Z in vX.Y.Z must be specified (e.g. v1.0.0). + +```bash +export VERSION=vX.Y.Z +git fetch origin +git branch -D release || true +git checkout --track origin/release +git checkout -b bump_$VERSION +``` + +If it's a regular release, we usually merge master. +```bash +git merge origin/master +``` + +Otherwise, if it is a hotfix release, we cherry-pick only the commits we want. +```bash +# get the commits ids we want to cherry-pick +git log +# cherry-pick the commits starting from the oldest one, without including merge commits +git cherry-pick +git cherry-pick +... +``` + +### 2. Bump the API version on master + +We don't want to stop contributions to master just because we are releasing. At +the same time, now that the release branch exists, we don't want API changes to +go to the now frozen API version. + +Create a new entry in `docs/reference/api/` by copying the latest and +bumping the version number (in both the file's name and content), and submit +this in a PR against master. + +### 3. Update CHANGELOG.md + +You can run this command for reference with git 2.0: + +```bash +git fetch --tags +LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If you don't have git 2.0 but have a sort command that supports `-V`: +```bash +git fetch --tags +LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. +```markdown +#### Notable features since +* New docker command to do something useful +* Remote API change (deprecating old version) +* Performance improvements in some usecases +* ... +``` + +For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. +Each change should be listed under a category heading formatted as `#### CATEGORY`. + +`CATEGORY` should describe which part of the project is affected. + Valid categories are: + * Builder + * Documentation + * Hack + * Packaging + * Remote API + * Runtime + * Other (please use this category sparingly) + +Each change should be formatted as `BULLET DESCRIPTION`, given: + +* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or + upgrade, respectively. + +* DESCRIPTION: a concise description of the change that is relevant to the + end-user, using the present tense. Changes should be described in terms + of how they affect the user, for example "Add new feature X which allows Y", + "Fix bug which caused X", "Increase performance of Y". + +EXAMPLES: + +```markdown +## 0.3.6 (1995-12-25) + +#### Builder + ++ 'docker build -t FOO .' applies the tag FOO to the newly built image + +#### Remote API + +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version +``` + +If you need a list of contributors between the last major release and the +current bump branch, use something like: +```bash +git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf +``` +Obviously, you'll need to adjust version numbers as necessary. If you just need +a count, add a simple `| wc -l`. + +### 4. Change the contents of the VERSION file + +Before the big thing, you'll want to make successive release candidates and get +people to test. The release candidate number `N` should be part of the version: + +```bash +export RC_VERSION=${VERSION}-rcN +echo ${RC_VERSION#v} > VERSION +``` + +### 5. Test the docs + +Make sure that your tree includes documentation for any modified or +new features, syntax or semantic changes. + +To test locally: + +```bash +make docs +``` + +To make a shared test at https://beta-docs.docker.io: + +(You will need the `awsconfig` file added to the `docs/` dir) + +```bash +make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release +``` + +### 6. Commit and create a pull request to the "release" branch + +```bash +git add VERSION CHANGELOG.md +git commit -m "Bump version to $VERSION" +git push $GITHUBUSER bump_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:release...$GITHUBUSER:bump_$VERSION?expand=1" +``` + +That last command will give you the proper link to visit to ensure that you +open the PR against the "release" branch instead of accidentally against +"master" (like so many brave souls before you already have). + +### 7. Publish release candidate binaries + +To run this you will need access to the release credentials. Get them from the +Core maintainers. + +Replace "..." with the respective credentials: + +```bash +docker build -t docker . +docker run \ + -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY="..." \ + -e AWS_SECRET_KEY="..." \ + -e GPG_PASSPHRASE="..." \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +It will run the test suite, build the binaries and packages, and upload to the +specified bucket, so this is a good time to verify that you're running against +**test**.docker.com. + +After the binaries and packages are uploaded to test.docker.com, make sure +they get tested in both Ubuntu and Debian for any obvious installation +issues or runtime issues. + +If everything looks good, it's time to create a git tag for this candidate: + +```bash +git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION +git push origin $RC_VERSION +``` + +Announcing on multiple medias is the best way to get some help testing! An easy +way to get some useful links for sharing: + +```bash +echo "Ubuntu/Debian: https://test.docker.com/ubuntu or curl -sSL https://test.docker.com/ | sh" +echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 32bit client binary: https://test.docker.com/builds/Darwin/i386/docker-${VERSION#v}" +echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" +``` + +We recommend announcing the release candidate on: + +- IRC on #docker, #docker-dev, #docker-maintainers +- In a comment on the pull request to notify subscribed people on GitHub +- The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group +- The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group +- Any social media that can bring some attention to the release candidate + +### 8. Iterate on successive release candidates + +Spend several days along with the community explicitly investing time and +resources to try and break Docker in every possible way, documenting any +findings pertinent to the release. This time should be spent testing and +finding ways in which the release might have caused various features or upgrade +environments to have issues, not coding. During this time, the release is in +code freeze, and any additional code changes will be pushed out to the next +release. + +It should include various levels of breaking Docker, beyond just using Docker +by the book. + +Any issues found may still remain issues for this release, but they should be +documented and give appropriate warnings. + +During this phase, the `bump_$VERSION` branch will keep evolving as you will +produce new release candidates. The frequency of new candidates is up to the +release manager: use your best judgement taking into account the severity of +reported issues, testers availability, and time to scheduled release date. + +Each time you'll want to produce a new release candidate, you will start by +adding commits to the branch, usually by cherry-picking from master: + +```bash +git cherry-pick -x -m0 +``` + +You want your "bump commit" (the one that updates the CHANGELOG and VERSION +files) to remain on top, so you'll have to `git rebase -i` to bring it back up. + +Now that your bump commit is back on top, you will need to update the CHANGELOG +file (if appropriate for this particular release candidate), and update the +VERSION file to increment the RC number: + +```bash +export RC_VERSION=$VERSION-rcN +echo $RC_VERSION > VERSION +``` + +You can now amend your last commit and update the bump branch: + +```bash +git commit --amend +git push -f $GITHUBUSER bump_$VERSION +``` + +Repeat step 6 to tag the code, publish new binaries, announce availability, and +get help testing. + +### 9. Finalize the bump branch + +When you're happy with the quality of a release candidate, you can move on and +create the real thing. + +You will first have to amend the "bump commit" to drop the release candidate +suffix in the VERSION file: + +```bash +echo $VERSION > VERSION +git add VERSION +git commit --amend +``` + +You will then repeat step 6 to publish the binaries to test + +### 10. Get 2 other maintainers to validate the pull request + +### 11. Publish final binaries + +Once they're tested and reasonably believed to be working, run against +get.docker.com: + +```bash +docker run \ + -e AWS_S3_BUCKET=get.docker.com \ + -e AWS_ACCESS_KEY="..." \ + -e AWS_SECRET_KEY="..." \ + -e GPG_PASSPHRASE="..." \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +### 12. Apply tag + +It's very important that we don't make the tag until after the official +release is uploaded to get.docker.com! + +```bash +git tag -a $VERSION -m $VERSION bump_$VERSION +git push origin $VERSION +``` + +### 13. Go to github to merge the `bump_$VERSION` branch into release + +Don't forget to push that pretty blue button to delete the leftover +branch afterwards! + +### 14. Update the docs branch + +If this is a MAJOR.MINOR.0 release, you need to make an branch for the previous release's +documentation: + +```bash +git checkout -b docs-$PREVIOUS_MAJOR_MINOR +git fetch +git reset --hard origin/docs +git push -f origin docs-$PREVIOUS_MAJOR_MINOR +``` + +You will need the `awsconfig` file added to the `docs/` directory to contain the +s3 credentials for the bucket you are deploying to. + +```bash +git checkout -b docs release || git checkout docs +git fetch +git reset --hard origin/release +git push -f origin docs +make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release +``` + +The docs will appear on https://docs.docker.com/ (though there may be cached +versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). +For more information about documentation releases, see `docs/README.md`. + +Note that the new docs will not appear live on the site until the cache (a complex, +distributed CDN system) is flushed. The `make docs-release` command will do this +_if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run +and you can check its progress with the CDN Cloudfront Chrome addin. + +### 15. Create a new pull request to merge your bump commit back into master + +```bash +git checkout master +git fetch +git reset --hard origin/master +git cherry-pick $VERSION +git push $GITHUBUSER merge_release_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" +``` + +Again, get two maintainers to validate, then merge, then push that pretty +blue button to delete your branch. + +### 16. Update the VERSION files + +Now that version X.Y.Z is out, time to start working on the next! Update the +content of the `VERSION` file to be the next minor (incrementing Y) and add the +`-dev` suffix. For example, after 1.5.0 release, the `VERSION` file gets +updated to `1.6.0-dev` (as in "1.6.0 in the making"). + +### 17. Rejoice and Evangelize! + +Congratulations! You're done. + +Go forth and announce the glad tidings of the new release in `#docker`, +`#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), +the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), +and on Twitter! diff --git a/project/REVIEWING.md b/project/REVIEWING.md new file mode 100644 index 00000000..63d8173a --- /dev/null +++ b/project/REVIEWING.md @@ -0,0 +1,193 @@ +Pull request reviewing process +============================== + +# Labels + +Labels are carefully picked to optimize for: + + - Readability: maintainers must immediately know the state of a PR + - Filtering simplicity: different labels represent many different aspects of + the reviewing work, and can even be targetted at different maintainers groups. + +A pull request should only be attributed labels documented in this section: other labels that may +exist on the repository should apply to issues. + +## DCO labels + + * `dco/no`: automatically set by a bot when one of the commits lacks proper signature + +## Status labels + + * `status/0-triage` + * `status/1-design-review` + * `status/2-code-review` + * `status/3-docs-review` + * `status/4-ready-to-merge` + +Special status labels: + + * `status/needs-attention`: calls for a collective discussion during a review session + +## Specialty group labels + +Those labels are used to raise awareness of a particular specialty group, either because we need +help in reviewing the PR, or because of the potential impact of the PR on their work: + + * `group/distribution` + * `group/networking` + * `group/security` + * `group/windows` + +## Impact labels (apply to merged pull requests) + + * `impact/api` + * `impact/changelog` + * `impact/cli` + * `impact/dockerfile` + * `impact/deprecation` + +# Workflow + +An opened pull request can be in 1 of 5 distinct states, for each of which there is a corresponding +label that needs to be applied. + +## Triage - `status/0-triage` + +Maintainers are expected to triage new incoming pull requests by removing the `status/0-triage` +label and adding the correct labels (e.g. `status/1-design-review`) before any other interaction +with the PR. The starting label may potentially skip some steps depending on the kind of pull +request: use your best judgement. + +Maintainers should perform an initial, high-level, overview of the pull request before moving it to +the next appropriate stage: + + - Has DCO + - Contains sufficient justification (e.g., usecases) for the proposed change + - References the Github issue it fixes (if any) in the commit or the first Github comment + +Possible transitions from this state: + + * Close: e.g., unresponsive contributor without DCO + * `status/1-design-review`: general case + * `status/2-code-review`: e.g. trivial bugfix + * `status/3-docs-review`: non-proposal documentation-only change + +## Design review - `status/1-design-review` + +Maintainers are expected to comment on the design of the pull request. Review of documentation is +expected only in the context of design validation, not for stylistic changes. + +Ideally, documentation should reflect the expected behavior of the code. No code review should +take place in this step. + +There are no strict rules on the way a design is validated: we usually aim for a consensus, +although a single maintainer approval is often sufficient for obviously reasonable changes. In +general, strong disagreement expressed by any of the maintainers should not be taken lightly. + +Once design is approved, a maintainer should make sure to remove this label and add the next one. + +Possible transitions from this state: + + * Close: design rejected + * `status/2-code-review`: general case + * `status/3-docs-review`: proposals with only documentation changes + +## Code review - `status/2-code-review` + +Maintainers are expected to review the code and ensure that it is good quality and in accordance +with the documentation in the PR. + +New testcases are expected to be added. Ideally, those testcases should fail when the new code is +absent, and pass when present. The testcases should strive to test as many variants, code paths, as +possible to ensure maximum coverage. + +Changes to code must be reviewed and approved (LGTM'd) by a minimum of two code maintainers. When +the author of a PR is a maintainer, he still needs the approval of two other maintainers. + +Once code is approved according to the rules of the subsystem, a maintainer should make sure to +remove this label and add the next one. If documentation is absent but expected, maintainers should +ask for documentation and move to status `status/3-docs-review` for docs maintainer to follow. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/3-docs-review`: general case + * `status/4-ready-to-merge`: change not impacting documentation + +## Docs review - `status/3-docs-review` + +Maintainers are expected to review the documentation in its bigger context, ensuring consistency, +completeness, validity, and breadth of coverage across all existing and new documentation. + +They should ask for any editorial change that makes the documentation more consistent and easier to +understand. + +Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs +sub-project maintainers. If the docs change originates with a docs maintainer, only one additional +LGTM is required (since we assume a docs maintainer approves of their own PR). + +Once documentation is approved (see below), a maintainer should make sure to remove this label and +add the next one. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/2-code-review`: requires more code changes + * `status/4-ready-to-merge`: general case + +## Merge - `status/4-ready-to-merge` + +Maintainers are expected to merge this pull request as soon as possible. They can ask for a rebase +or carry the pull request themselves. + +Possible transitions from this state: + + * Merge: general case + * Close: carry PR + +After merging a pull request, the maintainer should consider applying one or multiple impact labels +to ease future classification: + + * `impact/api` signifies the patch impacted the remote API + * `impact/changelog` signifies the change is significant enough to make it in the changelog + * `impact/cli` signifies the patch impacted a CLI command + * `impact/dockerfile` signifies the patch impacted the Dockerfile syntax + * `impact/deprecation` signifies the patch participates in deprecating an existing feature + +## Close + +If a pull request is closed it is expected that sufficient justification will be provided. In +particular, if there are alternative ways of achieving the same net result then those needs to be +spelled out. If the pull request is trying to solve a use case that is not one that we (as a +community) want to support then a justification for why should be provided. + +The number of maintainers it takes to decide and close a PR is deliberately left unspecified. We +assume that the group of maintainers is bound by mutual trust and respect, and that opposition from +any single maintainer should be taken into consideration. Similarly, we expect maintainers to +justify their reasoning and to accept debating. + +# Escalation process + +Despite the previously described reviewing process, some PR might not show any progress for various +reasons: + + - No strong opinion for or against the proposed patch + - Debates about the proper way to solve the problem at hand + - Lack of consensus + - ... + +All these will eventually lead to stalled PR, where no apparent progress is made across several +weeks, or even months. + +Maintainers should use their best judgement and apply the `status/needs-attention` label. It must +be used sparingly, as each PR with such label will be discussed by a group of maintainers during a +review session. The goal of that session is to agree on one of the following outcomes for the PR: + + * Close, explaining the rationale for not pursuing further + * Continue, either by pushing the PR further in the workflow, or by deciding to carry the patch + (ideally, a maintainer should be immediately assigned to make sure that the PR keeps continued + attention) + * Escalate to Solomon by formulating a few specific questions on which his answers will allow + maintainers to decide. diff --git a/project/TOOLS.md b/project/TOOLS.md new file mode 100644 index 00000000..6e19606e --- /dev/null +++ b/project/TOOLS.md @@ -0,0 +1,74 @@ +# Tools + +This page describes the tools we use and infrastructure that is in place for +the Docker project. + +### CI + +The Docker project uses [Jenkins](https://jenkins.dockerproject.org/) as our +continuous integration server. Each Pull Request to Docker is tested by running the +equivalent of `make all`. We chose Jenkins because we can host it ourselves and +we run Docker in Docker to test. + +#### Leeroy + +Leeroy is a Go application which integrates Jenkins with +GitHub pull requests. Leeroy uses +[GitHub hooks](https://developer.github.com/v3/repos/hooks/) +to listen for pull request notifications and starts jobs on your Jenkins +server. Using the Jenkins [notification plugin][jnp], Leeroy updates the +pull request using GitHub's +[status API](https://developer.github.com/v3/repos/statuses/) +with pending, success, failure, or error statuses. + +The leeroy repository is maintained at +[github.com/docker/leeroy](https://github.com/docker/leeroy). + +#### GordonTheTurtle IRC Bot + +The GordonTheTurtle IRC Bot lives in the +[#docker-maintainers](https://botbot.me/freenode/docker-maintainers/) channel +on Freenode. He is built in Go and is based off the project at +[github.com/fabioxgn/go-bot](https://github.com/fabioxgn/go-bot). + +His main command is `!rebuild`, which rebuilds a given Pull Request for a repository. +This command works by integrating with Leroy. He has a few other commands too, such +as `!gif` or `!godoc`, but we are always looking for more fun commands to add. + +The gordon-bot repository is maintained at +[github.com/jfrazelle/gordon-bot](https://github.com/jfrazelle/gordon-bot) + +### NSQ + +We use [NSQ](https://github.com/bitly/nsq) for various aspects of the project +infrastucture. + +#### Hooks + +The hooks project, +[github.com/crosbymichael/hooks](https://github.com/crosbymichael/hooks), +is a small Go application that manages web hooks from github, hub.docker.com, or +other third party services. + +It can be used for listening to github webhooks & pushing them to a queue, +archiving hooks to rethinkdb for processing, and broadcasting hooks to various +jobs. + +#### Docker Master Binaries + +One of the things queued from the Hooks are the building of the Master +Binaries. This happens on every push to the master branch of Docker. The +repository for this is maintained at +[github.com/jfrazelle/docker-bb](https://github.com/jfrazelle/docker-bb). + +#### Docker Master Docs + +The master build of the docs gets queued from the Hooks as well. They are built +using [github.com/jfrazelle/nsqexec](https://github.com/jfrazelle/nsqexec). + +#### Patch Parser Bot + +The bot, also named GordonTheTurtle, that labels and comments on Pull Requests +listens on Hooks as well. He is capable of knowing if a Pull Request needs to +be signed, or gofmt'd, as well as rebased. The repository for this is maintained at +[github.com/jfrazelle/gh-patch-parser](https://github.com/jfrazelle/gh-patch-parser). diff --git a/registry/auth.go b/registry/auth.go new file mode 100644 index 00000000..57560935 --- /dev/null +++ b/registry/auth.go @@ -0,0 +1,254 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" +) + +// Login tries to register/login to the registry server. +func Login(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) { + // Separates the v2 registry login logic from the v1 logic. + if registryEndpoint.Version == APIVersion2 { + return loginV2(authConfig, registryEndpoint, "" /* scope */) + } + return loginV1(authConfig, registryEndpoint) +} + +// loginV1 tries to register/login to the v1 registry server. +func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) { + var ( + status string + reqBody []byte + err error + reqStatusCode = 0 + serverAddress = authConfig.ServerAddress + ) + + logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint) + + if serverAddress == "" { + return "", fmt.Errorf("Server Error: Server Address not set.") + } + + loginAgainstOfficialIndex := serverAddress == IndexServer + + // to avoid sending the server address to the server it should be removed before being marshalled + authCopy := *authConfig + authCopy.ServerAddress = "" + + jsonBody, err := json.Marshal(authCopy) + if err != nil { + return "", fmt.Errorf("Config Error: %s", err) + } + + // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. + b := strings.NewReader(string(jsonBody)) + req1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b) + if err != nil { + return "", fmt.Errorf("Server Error: %s", err) + } + reqStatusCode = req1.StatusCode + defer req1.Body.Close() + reqBody, err = ioutil.ReadAll(req1.Body) + if err != nil { + return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) + } + + if reqStatusCode == 201 { + if loginAgainstOfficialIndex { + status = "Account created. Please use the confirmation link we sent" + + " to your e-mail to activate it." + } else { + // *TODO: Use registry configuration to determine what this says, if anything? + status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." + } + } else if reqStatusCode == 400 { + if string(reqBody) == "\"Username or email already exists\"" { + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + return "Login Succeeded", nil + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == 403 { + if loginAgainstOfficialIndex { + return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") + } + // *TODO: Use registry configuration to determine what this says, if anything? + return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else if resp.StatusCode == 500 { // Issue #14326 + logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) + return "", fmt.Errorf("Internal Server Error") + } + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) + } + return "", fmt.Errorf("Registration: %s", reqBody) + + } else if reqStatusCode == 401 { + // This case would happen with private registries where /v1/users is + // protected, so people can use `docker login` as an auth check. + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + return "Login Succeeded", nil + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else { + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) + } + } else { + return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) + } + return status, nil +} + +// loginV2 tries to login to the v2 registry server. The given registry endpoint has been +// pinged or setup with a list of authorization challenges. Each of these challenges are +// tried until one of them succeeds. Currently supported challenge schemes are: +// HTTP Basic Authorization +// Token Authorization with a separate token issuing server +// NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For +// now, users should create their account through other means like directly from a web page +// served by the v2 registry service provider. Whether this will be supported in the future +// is to be determined. +func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, scope string) (string, error) { + logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint) + var ( + err error + allErrors []error + ) + + for _, challenge := range registryEndpoint.AuthChallenges { + params := make(map[string]string, len(challenge.Parameters)+1) + for k, v := range challenge.Parameters { + params[k] = v + } + params["scope"] = scope + logrus.Debugf("trying %q auth challenge with params %v", challenge.Scheme, params) + + switch strings.ToLower(challenge.Scheme) { + case "basic": + err = tryV2BasicAuthLogin(authConfig, params, registryEndpoint) + case "bearer": + err = tryV2TokenAuthLogin(authConfig, params, registryEndpoint) + default: + // Unsupported challenge types are explicitly skipped. + err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme) + } + + if err == nil { + return "Login Succeeded", nil + } + + logrus.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err) + + allErrors = append(allErrors, err) + } + + return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors) +} + +func tryV2BasicAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { + req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) + if err != nil { + return err + } + + req.SetBasicAuth(authConfig.Username, authConfig.Password) + + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("basic auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return nil +} + +func tryV2TokenAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { + token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint) + if err != nil { + return err + } + + req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) + if err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("token auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return nil +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(config *cliconfig.ConfigFile, index *IndexInfo) cliconfig.AuthConfig { + configKey := index.GetAuthConfigKey() + // First try the happy case + if c, found := config.AuthConfigs[configKey]; found || index.Official { + return c + } + + convertToHostname := func(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range config.AuthConfigs { + if configKey == convertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return cliconfig.AuthConfig{} +} diff --git a/registry/auth_test.go b/registry/auth_test.go new file mode 100644 index 00000000..a8e3da01 --- /dev/null +++ b/registry/auth_test.go @@ -0,0 +1,173 @@ +package registry + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/cliconfig" +) + +func TestEncodeAuth(t *testing.T) { + newAuthConfig := &cliconfig.AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} + authStr := cliconfig.EncodeAuth(newAuthConfig) + decAuthConfig := &cliconfig.AuthConfig{} + var err error + decAuthConfig.Username, decAuthConfig.Password, err = cliconfig.DecodeAuth(authStr) + if err != nil { + t.Fatal(err) + } + if newAuthConfig.Username != decAuthConfig.Username { + t.Fatal("Encode Username doesn't match decoded Username") + } + if newAuthConfig.Password != decAuthConfig.Password { + t.Fatal("Encode Password doesn't match decoded Password") + } + if authStr != "a2VuOnRlc3Q=" { + t.Fatal("AuthString encoding isn't correct.") + } +} + +func setupTempConfigFile() (*cliconfig.ConfigFile, error) { + root, err := ioutil.TempDir("", "docker-test-auth") + if err != nil { + return nil, err + } + root = filepath.Join(root, cliconfig.ConfigFileName) + configFile := cliconfig.NewConfigFile(root) + + for _, registry := range []string{"testIndex", IndexServer} { + configFile.AuthConfigs[registry] = cliconfig.AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + Email: "docker@docker.io", + } + } + + return configFile, nil +} + +func TestSameAuthDataPostSave(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.Filename()) + + err = configFile.Save() + if err != nil { + t.Fatal(err) + } + + authConfig := configFile.AuthConfigs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Email != "docker@docker.io" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.Filename()) + + indexConfig := configFile.AuthConfigs[IndexServer] + + officialIndex := &IndexInfo{ + Official: true, + } + privateIndex := &IndexInfo{ + Official: false, + } + + resolved := ResolveAuthConfig(configFile, officialIndex) + assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") + + resolved = ResolveAuthConfig(configFile, privateIndex) + assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.Filename()) + + registryAuth := cliconfig.AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + Email: "foo@example.com", + } + localAuth := cliconfig.AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + Email: "bar@example.com", + } + officialAuth := cliconfig.AuthConfig{ + Username: "baz-user", + Password: "baz-pass", + Email: "baz@example.com", + } + configFile.AuthConfigs[IndexServer] = officialAuth + + expectedAuths := map[string]cliconfig.AuthConfig{ + "registry.example.com": registryAuth, + "localhost:8000": localAuth, + "registry.com": localAuth, + } + + validRegistries := map[string][]string{ + "registry.example.com": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "localhost:8000": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + configured, ok := expectedAuths[configKey] + if !ok || configured.Email == "" { + t.Fail() + } + index := &IndexInfo{ + Name: configKey, + } + for _, registry := range registries { + configFile.AuthConfigs[registry] = configured + resolved := ResolveAuthConfig(configFile, index) + if resolved.Email != configured.Email { + t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) + } + delete(configFile.AuthConfigs, registry) + resolved = ResolveAuthConfig(configFile, index) + if resolved.Email == configured.Email { + t.Errorf("%s -> %q == %q\n", registry, resolved.Email, configured.Email) + } + } + } +} diff --git a/registry/authchallenge.go b/registry/authchallenge.go new file mode 100644 index 00000000..e300d82a --- /dev/null +++ b/registry/authchallenge.go @@ -0,0 +1,150 @@ +package registry + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +type octetType byte + +// AuthorizationChallenge carries information +// from a WWW-Authenticate response header. +type AuthorizationChallenge struct { + Scheme string + Parameters map[string]string +} + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []*AuthorizationChallenge { + var challenges []*AuthorizationChallenge + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, &AuthorizationChallenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + i; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/registry/config.go b/registry/config.go new file mode 100644 index 00000000..8ab3d9a7 --- /dev/null +++ b/registry/config.go @@ -0,0 +1,377 @@ +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "net/url" + "strings" + + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/docker/image" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" +) + +// Options holds command line options. +type Options struct { + Mirrors opts.ListOpts + InsecureRegistries opts.ListOpts +} + +const ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = "https://registry-1.docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + // DefaultV1Registry is the URI of the default v1 registry + DefaultV1Registry = "https://index.docker.io" + + // CertsDir is the directory where certificates are stored + CertsDir = "/etc/docker/certs.d" + + // IndexServer is the v1 registry server used for user auth + account creation + IndexServer = DefaultV1Registry + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" + // NotaryServer is the endpoint serving the Notary trust server + NotaryServer = "https://notary.docker.io" +) + +var ( + // ErrInvalidRepositoryName is an error returned if the repository name did + // not have the correct form + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + + emptyServiceConfig = NewServiceConfig(nil) + + // V2Only controls access to legacy registries. If it is set to true via the + // command line flag the daemon will not attempt to contact v1 legacy registries + V2Only = false +) + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +func (options *Options) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { + options.Mirrors = opts.NewListOpts(ValidateMirror) + cmd.Var(&options.Mirrors, []string{"-registry-mirror"}, usageFn("Preferred Docker registry mirror")) + options.InsecureRegistries = opts.NewListOpts(ValidateIndexName) + cmd.Var(&options.InsecureRegistries, []string{"-insecure-registry"}, usageFn("Enable insecure registry communication")) + cmd.BoolVar(&V2Only, []string{"-disable-legacy-registry"}, false, "Do not contact legacy registries") +} + +type netIPNet net.IPNet + +func (ipnet *netIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +func (ipnet *netIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = netIPNet(*cidr) + } + } + return +} + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []*netIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NewServiceConfig returns a new instance of ServiceConfig +func NewServiceConfig(options *Options) *ServiceConfig { + if options == nil { + options = &Options{ + Mirrors: opts.NewListOpts(nil), + InsecureRegistries: opts.NewListOpts(nil), + } + } + + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + options.InsecureRegistries.Set("127.0.0.0/8") + + config := &ServiceConfig{ + InsecureRegistryCIDRs: make([]*netIPNet, 0), + IndexConfigs: make(map[string]*IndexInfo, 0), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + Mirrors: options.Mirrors.GetAll(), + } + // Split --insecure-registry into CIDR and registry-specific settings. + for _, r := range options.InsecureRegistries.GetAll() { + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, (*netIPNet)(ipnet)) + } else { + // Assume `host:port` if not CIDR. + config.IndexConfigs[r] = &IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + config.IndexConfigs[IndexName] = &IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return config +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func (config *ServiceConfig) isSecureIndex(indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides NewIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + host, _, err := net.SplitHostPort(indexName) + if err != nil { + // assume indexName is of the form `host` without the port and go on. + host = indexName + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range config.InsecureRegistryCIDRs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return false + } + } + } + + return true +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("%s is not a valid URI", val) + } + + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) + } + + if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") + } + + return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + // 'index.docker.io' => 'docker.io' + if val == "index."+IndexName { + val = IndexName + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) + } + // *TODO: Check if valid hostname[:port]/ip[:port]? + return val, nil +} + +func validateRemoteName(remoteName string) error { + + if !strings.Contains(remoteName, "/") { + + // the repository name must not be a valid image ID + if err := image.ValidateID(remoteName); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", remoteName) + } + } + + return v2.ValidateRepositoryName(remoteName) +} + +func validateNoSchema(reposName string) error { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return ErrInvalidRepositoryName + } + return nil +} + +// ValidateRepositoryName validates a repository name +func ValidateRepositoryName(reposName string) error { + var err error + if err = validateNoSchema(reposName); err != nil { + return err + } + indexName, remoteName := splitReposName(reposName) + if _, err = ValidateIndexName(indexName); err != nil { + return err + } + return validateRemoteName(remoteName) +} + +// NewIndexInfo returns IndexInfo configuration from indexName +func (config *ServiceConfig) NewIndexInfo(indexName string) (*IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + index := &IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Official: false, + } + index.Secure = config.isSecureIndex(indexName) + return index, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func (index *IndexInfo) GetAuthConfigKey() string { + if index.Official { + return IndexServer + } + return index.Name +} + +// splitReposName breaks a reposName into an index name and remote name +func splitReposName(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} + +// NewRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func (config *ServiceConfig) NewRepositoryInfo(reposName string) (*RepositoryInfo, error) { + if err := validateNoSchema(reposName); err != nil { + return nil, err + } + + indexName, remoteName := splitReposName(reposName) + if err := validateRemoteName(remoteName); err != nil { + return nil, err + } + + repoInfo := &RepositoryInfo{ + RemoteName: remoteName, + } + + var err error + repoInfo.Index, err = config.NewIndexInfo(indexName) + if err != nil { + return nil, err + } + + if repoInfo.Index.Official { + normalizedName := repoInfo.RemoteName + if strings.HasPrefix(normalizedName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + normalizedName = strings.SplitN(normalizedName, "/", 2)[1] + } + + repoInfo.LocalName = normalizedName + repoInfo.RemoteName = normalizedName + // If the normalized name does not contain a '/' (e.g. "foo") + // then it is an official repo. + if strings.IndexRune(normalizedName, '/') == -1 { + repoInfo.Official = true + // Fix up remote name for official repos. + repoInfo.RemoteName = "library/" + normalizedName + } + + repoInfo.CanonicalName = "docker.io/" + repoInfo.RemoteName + } else { + repoInfo.LocalName = repoInfo.Index.Name + "/" + repoInfo.RemoteName + repoInfo.CanonicalName = repoInfo.LocalName + + } + + return repoInfo, nil +} + +// GetSearchTerm special-cases using local name for official index, and +// remote name for private indexes. +func (repoInfo *RepositoryInfo) GetSearchTerm() string { + if repoInfo.Index.Official { + return repoInfo.LocalName + } + return repoInfo.RemoteName +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName string) (*RepositoryInfo, error) { + return emptyServiceConfig.NewRepositoryInfo(reposName) +} + +// NormalizeLocalName transforms a repository name into a normalize LocalName +// Passes through the name without transformation on error (image id, etc) +func NormalizeLocalName(name string) string { + repoInfo, err := ParseRepositoryInfo(name) + if err != nil { + return name + } + return repoInfo.LocalName +} diff --git a/registry/config_test.go b/registry/config_test.go new file mode 100644 index 00000000..25578a7f --- /dev/null +++ b/registry/config_test.go @@ -0,0 +1,49 @@ +package registry + +import ( + "testing" +) + +func TestValidateMirror(t *testing.T) { + valid := []string{ + "http://mirror-1.com", + "https://mirror-1.com", + "http://localhost", + "https://localhost", + "http://localhost:5000", + "https://localhost:5000", + "http://127.0.0.1", + "https://127.0.0.1", + "http://127.0.0.1:5000", + "https://127.0.0.1:5000", + } + + invalid := []string{ + "!invalid!://%as%", + "ftp://mirror-1.com", + "http://mirror-1.com/", + "http://mirror-1.com/?q=foo", + "http://mirror-1.com/v1/", + "http://mirror-1.com/v1/?q=foo", + "http://mirror-1.com/v1/?q=foo#frag", + "http://mirror-1.com?q=foo", + "https://mirror-1.com#frag", + "https://mirror-1.com/", + "https://mirror-1.com/#frag", + "https://mirror-1.com/v1/", + "https://mirror-1.com/v1/#", + "https://mirror-1.com?q", + } + + for _, address := range valid { + if ret, err := ValidateMirror(address); err != nil || ret == "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } + + for _, address := range invalid { + if ret, err := ValidateMirror(address); err == nil || ret != "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } +} diff --git a/registry/endpoint.go b/registry/endpoint.go new file mode 100644 index 00000000..20805767 --- /dev/null +++ b/registry/endpoint.go @@ -0,0 +1,276 @@ +package registry + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// scans string for api version in the URL path. returns the trimmed address, if version found, string and API version. +func scanForAPIVersion(address string) (string, APIVersion) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + + for k, v := range apiVersions { + if apiVersionStr == v { + address = strings.Join(chunks[:len(chunks)-1], "/") + return address, k + } + } + + return address, APIVersionUnknown +} + +// NewEndpoint parses the given address to return a registry endpoint. v can be used to +// specify a specific endpoint version +func NewEndpoint(index *IndexInfo, metaHeaders http.Header, v APIVersion) (*Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + endpoint, err := newEndpoint(index.GetAuthConfigKey(), tlsConfig, metaHeaders) + if err != nil { + return nil, err + } + if v != APIVersionUnknown { + endpoint.Version = v + } + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newEndpoint(address string, tlsConfig *tls.Config, metaHeaders http.Header) (*Endpoint, error) { + var ( + endpoint = new(Endpoint) + trimmedAddress string + err error + ) + + if !strings.HasPrefix(address, "http") { + address = "https://" + address + } + + endpoint.IsSecure = (tlsConfig == nil || !tlsConfig.InsecureSkipVerify) + + trimmedAddress, endpoint.Version = scanForAPIVersion(address) + + if endpoint.URL, err = url.Parse(trimmedAddress); err != nil { + return nil, err + } + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(metaHeaders)...)) + return endpoint, nil +} + +// Endpoint stores basic information about a registry endpoint. +type Endpoint struct { + client *http.Client + URL *url.URL + Version APIVersion + IsSecure bool + AuthChallenges []*AuthorizationChallenge + URLBuilder *v2.URLBuilder +} + +// Get the formated URL for the root of this registry Endpoint +func (e *Endpoint) String() string { + return fmt.Sprintf("%s/v%d/", e.URL, e.Version) +} + +// VersionString returns a formatted string of this +// endpoint address using the given API Version. +func (e *Endpoint) VersionString(version APIVersion) string { + return fmt.Sprintf("%s/v%d/", e.URL, version) +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *Endpoint) Path(path string) string { + return fmt.Sprintf("%s/v%d/%s", e.URL, e.Version, path) +} + +// Ping pings the remote endpoint with v2 and v1 pings to determine the API +// version. It returns a PingResult containing the discovered version. The +// PingResult also indicates whether the registry is standalone or not. +func (e *Endpoint) Ping() (PingResult, error) { + // The ping logic to use is determined by the registry endpoint version. + switch e.Version { + case APIVersion1: + return e.pingV1() + case APIVersion2: + return e.pingV2() + } + + // APIVersionUnknown + // We should try v2 first... + e.Version = APIVersion2 + regInfo, errV2 := e.pingV2() + if errV2 == nil { + return regInfo, nil + } + + // ... then fallback to v1. + e.Version = APIVersion1 + regInfo, errV1 := e.pingV1() + if errV1 == nil { + return regInfo, nil + } + + e.Version = APIVersionUnknown + return PingResult{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1) +} + +func (e *Endpoint) pingV1() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} + +func (e *Endpoint) pingV2() (PingResult, error) { + logrus.Debugf("attempting v2 ping for registry endpoint %s", e) + + req, err := http.NewRequest("GET", e.Path(""), nil) + if err != nil { + return PingResult{}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{}, err + } + defer resp.Body.Close() + + // The endpoint may have multiple supported versions. + // Ensure it supports the v2 Registry API. + var supportsV2 bool + +HeaderLoop: + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { + for _, versionName := range strings.Fields(supportedVersions) { + if versionName == "registry/2.0" { + supportsV2 = true + break HeaderLoop + } + } + } + + if !supportsV2 { + return PingResult{}, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e) + } + + if resp.StatusCode == http.StatusOK { + // It would seem that no authentication/authorization is required. + // So we don't need to parse/add any authorization schemes. + return PingResult{Standalone: true}, nil + } + + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + e.AuthChallenges = parseAuthHeader(resp.Header) + return PingResult{}, nil + } + + return PingResult{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode)) +} diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go new file mode 100644 index 00000000..ee301dbd --- /dev/null +++ b/registry/endpoint_test.go @@ -0,0 +1,93 @@ +package registry + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServer, IndexServer}, + {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000/v2/", "http://0.0.0.0:5000/v2/"}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v0/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v0/"}, + } + for _, td := range testData { + e, err := newEndpoint(td.str, nil, nil) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} + +// Ensure that a registry endpoint that responds with a 401 only is determined +// to be a v1 registry unless it includes a valid v2 API header. +func TestValidateEndpointAmbiguousAPIVersion(t *testing.T) { + requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) + w.WriteHeader(http.StatusUnauthorized) + }) + + requireBasicAuthHandlerV2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // This mock server supports v2.0, v2.1, v42.0, and v100.0 + w.Header().Add("Docker-Distribution-API-Version", "registry/100.0 registry/42.0") + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0 registry/2.1") + requireBasicAuthHandler.ServeHTTP(w, r) + }) + + // Make a test server which should validate as a v1 server. + testServer := httptest.NewServer(requireBasicAuthHandler) + defer testServer.Close() + + testServerURL, err := url.Parse(testServer.URL) + if err != nil { + t.Fatal(err) + } + + testEndpoint := Endpoint{ + URL: testServerURL, + Version: APIVersionUnknown, + client: HTTPClient(NewTransport(nil)), + } + + if err = validateEndpoint(&testEndpoint); err != nil { + t.Fatal(err) + } + + if testEndpoint.Version != APIVersion1 { + t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion1, testEndpoint.Version) + } + + // Make a test server which should validate as a v2 server. + testServer = httptest.NewServer(requireBasicAuthHandlerV2) + defer testServer.Close() + + testServerURL, err = url.Parse(testServer.URL) + if err != nil { + t.Fatal(err) + } + + testEndpoint.URL = testServerURL + testEndpoint.Version = APIVersionUnknown + + if err = validateEndpoint(&testEndpoint); err != nil { + t.Fatal(err) + } + + if testEndpoint.Version != APIVersion2 { + t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion2, testEndpoint.Version) + } +} diff --git a/registry/reference.go b/registry/reference.go new file mode 100644 index 00000000..e15f83ee --- /dev/null +++ b/registry/reference.go @@ -0,0 +1,68 @@ +package registry + +import ( + "strings" + + "github.com/docker/distribution/digest" +) + +// Reference represents a tag or digest within a repository +type Reference interface { + // HasDigest returns whether the reference has a verifiable + // content addressable reference which may be considered secure. + HasDigest() bool + + // ImageName returns an image name for the given repository + ImageName(string) string + + // Returns a string representation of the reference + String() string +} + +type tagReference struct { + tag string +} + +func (tr tagReference) HasDigest() bool { + return false +} + +func (tr tagReference) ImageName(repo string) string { + return repo + ":" + tr.tag +} + +func (tr tagReference) String() string { + return tr.tag +} + +type digestReference struct { + digest digest.Digest +} + +func (dr digestReference) HasDigest() bool { + return true +} + +func (dr digestReference) ImageName(repo string) string { + return repo + "@" + dr.String() +} + +func (dr digestReference) String() string { + return dr.digest.String() +} + +// ParseReference parses a reference into either a digest or tag reference +func ParseReference(ref string) Reference { + if strings.Contains(ref, ":") { + dgst, err := digest.ParseDigest(ref) + if err == nil { + return digestReference{digest: dgst} + } + } + return tagReference{tag: ref} +} + +// DigestReference creates a digest reference using a digest +func DigestReference(dgst digest.Digest) Reference { + return digestReference{digest: dgst} +} diff --git a/registry/registry.go b/registry/registry.go new file mode 100644 index 00000000..f6907994 --- /dev/null +++ b/registry/registry.go @@ -0,0 +1,248 @@ +package registry + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/docker/pkg/useragent" +) + +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") + errLoginRequired = errors.New("Authentication is required.") +) + +// dockerUserAgent is the User-Agent the Docker client uses to identify itself. +// It is populated on init(), comprising version information of different components. +var dockerUserAgent string + +func init() { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{"docker", dockerversion.VERSION}) + httpVersion = append(httpVersion, useragent.VersionInfo{"go", runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{"git-commit", dockerversion.GITCOMMIT}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{"kernel", kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{"os", runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{"arch", runtime.GOARCH}) + + dockerUserAgent = useragent.AppendVersions("", httpVersion...) + + if runtime.GOOS != "linux" { + V2Only = true + } +} + +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure { + hostDir := filepath.Join(CertsDir, hostname) + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(&tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return &tlsConfig, nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := ioutil.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + // TODO(dmcgowan): Copy system pool + tlsConfig.RootCAs = x509.NewCertPool() + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return fmt.Errorf("Missing key %s for certificate %s", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return fmt.Errorf("Missing certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// DockerHeaders returns request modifiers that ensure requests have +// the User-Agent header set to dockerUserAgent and that metaHeaders +// are added. +func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{ + transport.NewHeaderRequestModifier(http.Header{"User-Agent": []string{dockerUserAgent}}), + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// HTTPClient returns a HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func HTTPClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +func shouldV2Fallback(err errcode.Error) bool { + logrus.Debugf("v2 error: %T %v", err, err) + switch err.Code { + case v2.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown: + return true + } + return false +} + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// ContinueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func ContinueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + return ContinueOnError(v[0]) + case ErrNoSupport: + return ContinueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func NewTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + var cfg = tlsconfig.ServerDefault + tlsConfig = &cfg + } + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } +} diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go new file mode 100644 index 00000000..fb19e577 --- /dev/null +++ b/registry/registry_mock_test.go @@ -0,0 +1,476 @@ +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/docker/opts" + "github.com/gorilla/mux" + + "github.com/Sirupsen/logrus" +) + +var ( + testHTTPServer *httptest.Server + testHTTPSServer *httptest.Server + testLayers = map[string]map[string]string{ + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { + "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", + "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, + 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, + 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, + 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, + 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, + 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, + 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, + 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, + 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, + }), + }, + "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { + "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", + "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, + 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, + 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, + 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, + 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, + 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, + 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, + 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, + 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, + }), + }, + } + testRepositories = map[string]map[string]string{ + "foo42/bar": { + "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + }, + } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + "other.com": {net.ParseIP("43.43.43.43")}, + } +) + +func init() { + r := mux.NewRouter() + + // /v1/ + r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") + r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") + r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") + r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") + r.HandleFunc("/v1/search", handlerSearch).Methods("GET") + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func makeURL(req string) string { + return testHTTPServer.URL + req +} + +func makeHTTPSURL(req string) string { + return testHTTPSServer.URL + req +} + +func makeIndex(req string) *IndexInfo { + index := &IndexInfo{ + Name: makeURL(req), + } + return index +} + +func makeHTTPSIndex(req string) *IndexInfo { + index := &IndexInfo{ + Name: makeHTTPSURL(req), + } + return index +} + +func makePublicIndex() *IndexInfo { + index := &IndexInfo{ + Name: IndexServer, + Secure: true, + Official: true, + } + return index +} + +func makeServiceConfig(mirrors []string, insecureRegistries []string) *ServiceConfig { + options := &Options{ + Mirrors: opts.NewListOpts(nil), + InsecureRegistries: opts.NewListOpts(nil), + } + if mirrors != nil { + for _, mirror := range mirrors { + options.Mirrors.Set(mirror) + } + } + if insecureRegistries != nil { + for _, insecureRegistries := range insecureRegistries { + options.InsecureRegistries.Set(insecureRegistries) + } + } + + return NewServiceConfig(options) +} + +func writeHeaders(w http.ResponseWriter) { + h := w.Header() + h.Add("Server", "docker-tests/mock") + h.Add("Expires", "-1") + h.Add("Content-Type", "application/json") + h.Add("Pragma", "no-cache") + h.Add("Cache-Control", "no-cache") + h.Add("X-Docker-Registry-Version", "0.0.0") + h.Add("X-Docker-Registry-Config", "mock") +} + +func writeResponse(w http.ResponseWriter, message interface{}, code int) { + writeHeaders(w) + w.WriteHeader(code) + body, err := json.Marshal(message) + if err != nil { + io.WriteString(w, err.Error()) + return + } + w.Write(body) +} + +func readJSON(r *http.Request, dest interface{}) error { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + return json.Unmarshal(body, dest) +} + +func apiError(w http.ResponseWriter, message string, code int) { + body := map[string]string{ + "error": message, + } + writeResponse(w, body, code) +} + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a != b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v == %v", a, b) + } + t.Fatal(message) +} + +// Similar to assertEqual, but does not stop test +func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a == b { + return + } + message := fmt.Sprintf("%v != %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +// Similar to assertNotEqual, but does not stop test +func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a != b { + return + } + message := fmt.Sprintf("%v == %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +func requiresAuth(w http.ResponseWriter, r *http.Request) bool { + writeCookie := func() { + value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) + cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} + http.SetCookie(w, cookie) + //FIXME(sam): this should be sent only on Index routes + value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) + w.Header().Add("X-Docker-Token", value) + } + if len(r.Cookies()) > 0 { + writeCookie() + return true + } + if len(r.Header.Get("Authorization")) > 0 { + writeCookie() + return true + } + w.Header().Add("WWW-Authenticate", "token") + apiError(w, "Wrong auth", 401) + return false +} + +func handlerGetPing(w http.ResponseWriter, r *http.Request) { + writeResponse(w, true, 200) +} + +func handlerGetImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + layer, exists := testLayers[vars["image_id"]] + if !exists { + http.NotFound(w, r) + return + } + writeHeaders(w) + layerSize := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) + io.WriteString(w, layer[vars["action"]]) +} + +func handlerPutImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + imageID := vars["image_id"] + action := vars["action"] + layer, exists := testLayers[imageID] + if !exists { + if action != "json" { + http.NotFound(w, r) + return + } + layer = make(map[string]string) + testLayers[imageID] = layer + } + if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { + if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { + apiError(w, "Wrong checksum", 400) + return + } + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + apiError(w, fmt.Sprintf("Error: %s", err), 500) + return + } + layer[action] = string(body) + writeResponse(w, true, 200) +} + +func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + repositoryName := mux.Vars(r)["repository"] + repositoryName = NormalizeLocalName(repositoryName) + tags, exists := testRepositories[repositoryName] + if !exists { + apiError(w, "Repository not found", 404) + return + } + if r.Method == "DELETE" { + delete(testRepositories, repositoryName) + writeResponse(w, true, 200) + return + } + writeResponse(w, tags, 200) +} + +func handlerGetTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName := vars["repository"] + repositoryName = NormalizeLocalName(repositoryName) + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName] + if !exists { + apiError(w, "Repository not found", 404) + return + } + tag, exists := tags[tagName] + if !exists { + apiError(w, "Tag not found", 404) + return + } + writeResponse(w, tag, 200) +} + +func handlerPutTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName := vars["repository"] + repositoryName = NormalizeLocalName(repositoryName) + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName] + if !exists { + tags := make(map[string]string) + testRepositories[repositoryName] = tags + } + tagValue := "" + readJSON(r, tagValue) + tags[tagName] = tagValue + writeResponse(w, true, 200) +} + +func handlerUsers(w http.ResponseWriter, r *http.Request) { + code := 200 + if r.Method == "POST" { + code = 201 + } else if r.Method == "PUT" { + code = 204 + } + writeResponse(w, "", code) +} + +func handlerImages(w http.ResponseWriter, r *http.Request) { + u, _ := url.Parse(testHTTPServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) + w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) + if r.Method == "PUT" { + if strings.HasSuffix(r.URL.Path, "images") { + writeResponse(w, "", 204) + return + } + writeResponse(w, "", 200) + return + } + if r.Method == "DELETE" { + writeResponse(w, "", 204) + return + } + images := []map[string]string{} + for imageID, layer := range testLayers { + image := make(map[string]string) + image["id"] = imageID + image["checksum"] = layer["checksum_tarsum"] + image["Tag"] = "latest" + images = append(images, image) + } + writeResponse(w, images, 200) +} + +func handlerAuth(w http.ResponseWriter, r *http.Request) { + writeResponse(w, "OK", 200) +} + +func handlerSearch(w http.ResponseWriter, r *http.Request) { + result := &SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) +} + +func TestPing(t *testing.T) { + res, err := http.Get(makeURL("/v1/_ping")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, res.StatusCode, 200, "") + assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", + "This is not a Mocked Registry") +} + +/* Uncomment this to test Mocked Registry locally with curl + * WARNING: Don't push on the repos uncommented, it'll block the tests + * +func TestWait(t *testing.T) { + logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) + c := make(chan int) + <-c +} + +//*/ diff --git a/registry/registry_test.go b/registry/registry_test.go new file mode 100644 index 00000000..03751862 --- /dev/null +++ b/registry/registry_test.go @@ -0,0 +1,918 @@ +package registry + +import ( + "fmt" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "testing" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/cliconfig" +) + +var ( + token = []string{"fake-token"} +) + +const ( + imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + REPO = "foo42/bar" +) + +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &cliconfig.AuthConfig{} + endpoint, err := NewEndpoint(makeIndex("/v1/"), nil, APIVersionUnknown) + if err != nil { + t.Fatal(err) + } + var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} + tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(nil)...) + client := HTTPClient(tr) + r, err := NewSession(client, authConfig, endpoint) + if err != nil { + t.Fatal(err) + } + // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` + // header while authenticating, in order to retrieve a token that can be later used to + // perform authenticated actions. + // + // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, + // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. + // + // Because we know that the client's transport is an `*authTransport` we simply cast it, + // in order to set the internal cached token to the fake token, and thus send that fake token + // upon every subsequent requests. + r.client.Transport.(*authTransport).token = token + return r +} + +func TestPingRegistryEndpoint(t *testing.T) { + testPing := func(index *IndexInfo, expectedStandalone bool, assertMessage string) { + ep, err := NewEndpoint(index, nil, APIVersionUnknown) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() + if err != nil { + t.Fatal(err) + } + + assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) + } + + testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makePublicIndex(), false, "Expected standalone to be false for public index") +} + +func TestEndpoint(t *testing.T) { + // Simple wrapper to fail test if err != nil + expandEndpoint := func(index *IndexInfo) *Endpoint { + endpoint, err := NewEndpoint(index, nil, APIVersionUnknown) + if err != nil { + t.Fatal(err) + } + return endpoint + } + + assertInsecureIndex := func(index *IndexInfo) { + index.Secure = true + _, err := NewEndpoint(index, nil, APIVersionUnknown) + assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") + assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") + index.Secure = false + } + + assertSecureIndex := func(index *IndexInfo) { + index.Secure = true + _, err := NewEndpoint(index, nil, APIVersionUnknown) + assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") + assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") + index.Secure = false + } + + index := &IndexInfo{} + index.Name = makeURL("/v1/") + endpoint := expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertInsecureIndex(index) + + index.Name = makeURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertInsecureIndex(index) + + httpURL := makeURL("") + index.Name = strings.SplitN(httpURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertInsecureIndex(index) + + index.Name = makeHTTPSURL("/v1/") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertSecureIndex(index) + + index.Name = makeHTTPSURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertSecureIndex(index) + + httpsURL := makeHTTPSURL("") + index.Name = strings.SplitN(httpsURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertSecureIndex(index) + + badEndpoints := []string{ + "http://127.0.0.1/v1/", + "https://127.0.0.1/v1/", + "http://127.0.0.1", + "https://127.0.0.1", + "127.0.0.1", + } + for _, address := range badEndpoints { + index.Name = address + _, err := NewEndpoint(index, nil, APIVersionUnknown) + checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") + } +} + +func TestGetRemoteHistory(t *testing.T) { + r := spawnTestRegistrySession(t) + hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(hist), 2, "Expected 2 images in history") + assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") + assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "Unexpected second ancestry") +} + +func TestLookupRemoteImage(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.LookupRemoteImage(imageID, makeURL("/v1/")) + assertEqual(t, err, nil, "Expected error of remote lookup to nil") + if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { + t.Fatal("Expected error of remote lookup to not nil") + } +} + +func TestGetRemoteImageJSON(t *testing.T) { + r := spawnTestRegistrySession(t) + json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, size, 154, "Expected size 154") + if len(json) <= 0 { + t.Fatal("Expected non-empty json") + } + + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteImageLayer(t *testing.T) { + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) + if err != nil { + t.Fatal(err) + } + if data == nil { + t.Fatal("Expected non-nil data result") + } + + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteTag(t *testing.T) { + r := spawnTestRegistrySession(t) + tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, REPO, "test") + if err != nil { + t.Fatal(err) + } + assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) + + _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, "foo42/baz", "foo") + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") + } +} + +func TestGetRemoteTags(t *testing.T) { + r := spawnTestRegistrySession(t) + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(tags), 2, "Expected two tags") + assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) + assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) + + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz") + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") + } +} + +func TestGetRepositoryData(t *testing.T) { + r := spawnTestRegistrySession(t) + parsedURL, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedURL.Host + "/v1/" + data, err := r.GetRepositoryData("foo42/bar") + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + +} + +func TestPushImageJSONRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := &ImgData{ + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + } + + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageLayerRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + layer := strings.NewReader("") + _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestValidateRepositoryName(t *testing.T) { + validRepoNames := []string{ + "docker/docker", + "library/debian", + "debian", + "docker.io/docker/docker", + "docker.io/library/debian", + "docker.io/debian", + "index.docker.io/docker/docker", + "index.docker.io/library/debian", + "index.docker.io/debian", + "127.0.0.1:5000/docker/docker", + "127.0.0.1:5000/library/debian", + "127.0.0.1:5000/debian", + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + } + invalidRepoNames := []string{ + "https://github.com/docker/docker", + "docker/Docker", + "-docker", + "-docker/docker", + "-docker.io/docker/docker", + "docker///docker", + "docker.io/docker/Docker", + "docker.io/docker///docker", + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + } + + for _, name := range invalidRepoNames { + err := ValidateRepositoryName(name) + assertNotEqual(t, err, nil, "Expected invalid repo name: "+name) + } + + for _, name := range validRepoNames { + err := ValidateRepositoryName(name) + assertEqual(t, err, nil, "Expected valid repo name: "+name) + } + + err := ValidateRepositoryName(invalidRepoNames[0]) + assertEqual(t, err, ErrInvalidRepositoryName, "Expected ErrInvalidRepositoryName: "+invalidRepoNames[0]) +} + +func TestParseRepositoryInfo(t *testing.T) { + expectedRepoInfos := map[string]RepositoryInfo{ + "fooo/bar": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "fooo/bar", + LocalName: "fooo/bar", + CanonicalName: "docker.io/fooo/bar", + Official: false, + }, + "library/ubuntu": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "nonlibrary/ubuntu": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "nonlibrary/ubuntu", + LocalName: "nonlibrary/ubuntu", + CanonicalName: "docker.io/nonlibrary/ubuntu", + Official: false, + }, + "ubuntu": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "other/library": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "other/library", + LocalName: "other/library", + CanonicalName: "docker.io/other/library", + Official: false, + }, + "127.0.0.1:8000/private/moonbase": { + Index: &IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "127.0.0.1:8000/private/moonbase", + CanonicalName: "127.0.0.1:8000/private/moonbase", + Official: false, + }, + "127.0.0.1:8000/privatebase": { + Index: &IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "127.0.0.1:8000/privatebase", + CanonicalName: "127.0.0.1:8000/privatebase", + Official: false, + }, + "localhost:8000/private/moonbase": { + Index: &IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost:8000/private/moonbase", + CanonicalName: "localhost:8000/private/moonbase", + Official: false, + }, + "localhost:8000/privatebase": { + Index: &IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost:8000/privatebase", + CanonicalName: "localhost:8000/privatebase", + Official: false, + }, + "example.com/private/moonbase": { + Index: &IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com/private/moonbase", + CanonicalName: "example.com/private/moonbase", + Official: false, + }, + "example.com/privatebase": { + Index: &IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com/privatebase", + CanonicalName: "example.com/privatebase", + Official: false, + }, + "example.com:8000/private/moonbase": { + Index: &IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com:8000/private/moonbase", + CanonicalName: "example.com:8000/private/moonbase", + Official: false, + }, + "example.com:8000/privatebase": { + Index: &IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com:8000/privatebase", + CanonicalName: "example.com:8000/privatebase", + Official: false, + }, + "localhost/private/moonbase": { + Index: &IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost/private/moonbase", + CanonicalName: "localhost/private/moonbase", + Official: false, + }, + "localhost/privatebase": { + Index: &IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost/privatebase", + CanonicalName: "localhost/privatebase", + Official: false, + }, + IndexName + "/public/moonbase": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "index." + IndexName + "/public/moonbase": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "ubuntu-12.04-base": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + IndexName + "/ubuntu-12.04-base": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + "index." + IndexName + "/ubuntu-12.04-base": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + } + + for reposName, expectedRepoInfo := range expectedRepoInfos { + repoInfo, err := ParseRepositoryInfo(reposName) + if err != nil { + t.Error(err) + } else { + checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) + checkEqual(t, repoInfo.RemoteName, expectedRepoInfo.RemoteName, reposName) + checkEqual(t, repoInfo.LocalName, expectedRepoInfo.LocalName, reposName) + checkEqual(t, repoInfo.CanonicalName, expectedRepoInfo.CanonicalName, reposName) + checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) + checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) + } + } +} + +func TestNewIndexInfo(t *testing.T) { + testIndexInfo := func(config *ServiceConfig, expectedIndexInfos map[string]*IndexInfo) { + for indexName, expectedIndexInfo := range expectedIndexInfos { + index, err := config.NewIndexInfo(indexName) + if err != nil { + t.Fatal(err) + } else { + checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") + checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") + checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") + checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") + } + } + } + + config := NewServiceConfig(nil) + noMirrors := []string{} + expectedIndexInfos := map[string]*IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} + config = makeServiceConfig(publicMirrors, []string{"example.com"}) + + expectedIndexInfos = map[string]*IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + expectedIndexInfos = map[string]*IndexInfo{ + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) +} + +func TestPushRegistryTag(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageJSONIndex(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := []*ImgData{ + { + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + }, + { + ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + }, + } + repoData, err := r.PushImageJSONIndex("foo42/bar", imgData, false, nil) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } + repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint.String()}) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } +} + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery") + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") +} + +func TestValidRemoteName(t *testing.T) { + validRepositoryNames := []string{ + // Sanity check. + "docker/docker", + + // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + + // Allow embedded hyphens. + "docker-rules/docker", + + //Username doc and image name docker being tested. + "doc/docker", + + // single character names are now allowed. + "d/docker", + "jess/t", + } + for _, repositoryName := range validRepositoryNames { + if err := validateRemoteName(repositoryName); err != nil { + t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) + } + } + + invalidRepositoryNames := []string{ + // Disallow capital letters. + "docker/Docker", + + // Only allow one slash. + "docker///docker", + + // Disallow 64-character hexadecimal. + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + + // Disallow leading and trailing hyphens in namespace. + "-docker/docker", + "docker-/docker", + "-docker-/docker", + + // Don't allow underscores everywhere (as opposed to hyphens). + "____/____", + + "_docker/_docker", + + // Disallow consecutive hyphens. + "dock--er/docker", + + // No repository. + "docker/", + + //namespace too long + "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", + } + for _, repositoryName := range invalidRepositoryNames { + if err := validateRemoteName(repositoryName); err == nil { + t.Errorf("Repository name should be invalid: %v", repositoryName) + } + } +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} + +func TestIsSecureIndex(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {IndexName, nil, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, true}, + {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, + {"invalid.domain.com", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, + } + for _, tt := range tests { + config := makeServiceConfig(nil, tt.insecureRegistries) + if sec := config.isSecureIndex(tt.addr); sec != tt.expected { + t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + } + } +} + +type debugTransport struct { + http.RoundTripper + log func(...interface{}) +} + +func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { + dump, err := httputil.DumpRequestOut(req, false) + if err != nil { + tr.log("could not dump request") + } + tr.log(string(dump)) + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + dump, err = httputil.DumpResponse(resp, false) + if err != nil { + tr.log("could not dump response") + } + tr.log(string(dump)) + return resp, err +} diff --git a/registry/service.go b/registry/service.go new file mode 100644 index 00000000..7237fb04 --- /dev/null +++ b/registry/service.go @@ -0,0 +1,148 @@ +package registry + +import ( + "crypto/tls" + "net/http" + "net/url" + + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/cliconfig" +) + +// Service is a registry service. It tracks configuration data such as a list +// of mirrors. +type Service struct { + Config *ServiceConfig +} + +// NewService returns a new instance of Service ready to be +// installed into an engine. +func NewService(options *Options) *Service { + return &Service{ + Config: NewServiceConfig(options), + } +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was sucessful. +// It can be used to verify the validity of a client's credentials. +func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) { + addr := authConfig.ServerAddress + if addr == "" { + // Use the official registry address if not specified. + addr = IndexServer + } + index, err := s.ResolveIndex(addr) + if err != nil { + return "", err + } + + endpointVersion := APIVersion(APIVersionUnknown) + if V2Only { + // Override the endpoint to only attempt a v2 ping + endpointVersion = APIVersion2 + } + + endpoint, err := NewEndpoint(index, nil, endpointVersion) + if err != nil { + return "", err + } + authConfig.ServerAddress = endpoint.String() + return Login(authConfig, endpoint) +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers map[string][]string) (*SearchResults, error) { + repoInfo, err := s.ResolveRepository(term) + if err != nil { + return nil, err + } + + // *TODO: Search multiple indexes. + endpoint, err := NewEndpoint(repoInfo.Index, http.Header(headers), APIVersionUnknown) + if err != nil { + return nil, err + } + + r, err := NewSession(endpoint.client, authConfig, endpoint) + if err != nil { + return nil, err + } + return r.SearchRepositories(repoInfo.GetSearchTerm()) +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *Service) ResolveRepository(name string) (*RepositoryInfo, error) { + return s.Config.NewRepositoryInfo(name) +} + +// ResolveIndex takes indexName and returns index info +func (s *Service) ResolveIndex(name string) (*IndexInfo, error) { + return s.Config.NewIndexInfo(name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL string + Version APIVersion + Official bool + TrimHostname bool + TLSConfig *tls.Config + VersionHeader string + Versions []auth.APIVersion +} + +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +func (e APIEndpoint) ToV1Endpoint(metaHeaders http.Header) (*Endpoint, error) { + return newEndpoint(e.URL, e.TLSConfig, metaHeaders) +} + +// TLSConfig constructs a client TLS configuration based on server defaults +func (s *Service) TLSConfig(hostname string) (*tls.Config, error) { + return newTLSConfig(hostname, s.Config.isSecureIndex(hostname)) +} + +func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) { + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + return s.TLSConfig(mirrorURL.Host) +} + +// LookupPullEndpoints creates an list of endpoints to try to pull from, in order of preference. +// It gives preference to v2 endpoints over v1, mirrors over the actual +// registry, and HTTPS over plain HTTP. +func (s *Service) LookupPullEndpoints(repoName string) (endpoints []APIEndpoint, err error) { + return s.lookupEndpoints(repoName) +} + +// LookupPushEndpoints creates an list of endpoints to try to push to, in order of preference. +// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. +// Mirrors are not included. +func (s *Service) LookupPushEndpoints(repoName string) (endpoints []APIEndpoint, err error) { + return s.lookupEndpoints(repoName) +} + +func (s *Service) lookupEndpoints(repoName string) (endpoints []APIEndpoint, err error) { + endpoints, err = s.lookupV2Endpoints(repoName) + + if err != nil { + return nil, err + } + + if V2Only { + return endpoints, nil + } + + legacyEndpoints, err := s.lookupV1Endpoints(repoName) + if err != nil { + return nil, err + } + endpoints = append(endpoints, legacyEndpoints...) + + return endpoints, nil +} diff --git a/registry/service_v1.go b/registry/service_v1.go new file mode 100644 index 00000000..ddb78ee6 --- /dev/null +++ b/registry/service_v1.go @@ -0,0 +1,54 @@ +package registry + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/tlsconfig" +) + +func (s *Service) lookupV1Endpoints(repoName string) (endpoints []APIEndpoint, err error) { + var cfg = tlsconfig.ServerDefault + tlsConfig := &cfg + if strings.HasPrefix(repoName, DefaultNamespace+"/") { + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV1Registry, + Version: APIVersion1, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + return endpoints, nil + } + + slashIndex := strings.IndexRune(repoName, '/') + if slashIndex <= 0 { + return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) + } + hostname := repoName[:slashIndex] + + tlsConfig, err = s.TLSConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: "https://" + hostname, + Version: APIVersion1, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ // or this + URL: "http://" + hostname, + Version: APIVersion1, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + return endpoints, nil +} diff --git a/registry/service_v2.go b/registry/service_v2.go new file mode 100644 index 00000000..70d5fd71 --- /dev/null +++ b/registry/service_v2.go @@ -0,0 +1,83 @@ +package registry + +import ( + "fmt" + "strings" + + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/pkg/tlsconfig" +) + +func (s *Service) lookupV2Endpoints(repoName string) (endpoints []APIEndpoint, err error) { + var cfg = tlsconfig.ServerDefault + tlsConfig := &cfg + if strings.HasPrefix(repoName, DefaultNamespace+"/") { + // v2 mirrors + for _, mirror := range s.Config.Mirrors { + mirrorTLSConfig, err := s.tlsConfigForMirror(mirror) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirror, + // guess mirrors are v2 + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + // v2 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + + return endpoints, nil + } + + slashIndex := strings.IndexRune(repoName, '/') + if slashIndex <= 0 { + return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) + } + hostname := repoName[:slashIndex] + + tlsConfig, err = s.TLSConfig(hostname) + if err != nil { + return nil, err + } + + v2Versions := []auth.APIVersion{ + { + Type: "registry", + Version: "2.0", + }, + } + endpoints = []APIEndpoint{ + { + URL: "https://" + hostname, + Version: APIVersion2, + TrimHostname: true, + TLSConfig: tlsConfig, + VersionHeader: DefaultRegistryVersionHeader, + Versions: v2Versions, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: "http://" + hostname, + Version: APIVersion2, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + VersionHeader: DefaultRegistryVersionHeader, + Versions: v2Versions, + }) + } + + return endpoints, nil +} diff --git a/registry/session.go b/registry/session.go new file mode 100644 index 00000000..9bec7c1b --- /dev/null +++ b/registry/session.go @@ -0,0 +1,760 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + "errors" + "sync" + // this is required for some certificates + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" +) + +var ( + // ErrRepoNotFound is returned if the repository didn't exist on the + // remote side + ErrRepoNotFound = errors.New("Repository not found") +) + +// A Session is used to communicate with a V1 registry +type Session struct { + indexEndpoint *Endpoint + client *http.Client + // TODO(tiborvass): remove authConfig + authConfig *cliconfig.AuthConfig + id string +} + +type authTransport struct { + http.RoundTripper + *cliconfig.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func AuthTransport(base http.RoundTripper, authConfig *cliconfig.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes a HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referer header as go http package adds said header. + // This is safe as Docker doesn't set Referer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + delete(tr.modReq, orig) + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +// NewSession creates a new session +// TODO(tiborvass): remove authConfig param once registry client v2 is vendored +func NewSession(client *http.Client, authConfig *cliconfig.AuthConfig, endpoint *Endpoint) (r *Session, err error) { + r = &Session{ + authConfig: authConfig, + client: client, + indexEndpoint: endpoint, + id: stringid.GenerateRandomID(), + } + + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.VersionString(1) != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.Ping() + if err != nil { + return nil, err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return nil, errors.New("cookiejar.New is not supposed to return an error") + } + client.Jar = jar + + return r, nil +} + +// ID returns this registry session's ID. +func (r *Session) ID() string { + return r.id +} + +// GetRemoteHistory retrieves the history of a given image from the registry. +// It returns a list of the parent's JSON files (including the requested image). +func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + var history []string + if err := json.NewDecoder(res.Body).Decode(&history); err != nil { + return nil, fmt.Errorf("Error while reading the http response: %v", err) + } + + logrus.Debugf("Ancestry: %v", history) + return history, nil +} + +// LookupRemoteImage checks if an image exists in the registry +func (r *Session) LookupRemoteImage(imgID, registry string) error { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + return nil +} + +// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := -1 + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.Atoi(hdr) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +// GetRemoteImageLayer retrieves an image layer from the registry +func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { + var ( + retries = 5 + statusCode = 0 + res *http.Response + err error + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := http.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // TODO(tiborvass): why are we doing retries at this level? + // These retries should be generic to both v1 and v2 + for i := 1; i <= retries; i++ { + statusCode = 0 + res, err = r.client.Do(req) + if err == nil { + break + } + logrus.Debugf("Error contacting registry %s: %v", registry, err) + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + if i == retries { + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + time.Sleep(time.Duration(i) * 5 * time.Second) + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + logrus.Debugf("server supports resume") + return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + } + logrus.Debugf("server doesn't support resume") + return res.Body, nil +} + +// GetRemoteTag retrieves the tag named in the askedTag argument from the given +// repository. It queries each of the registries supplied in the registries +// argument, and returns data from the first one that answers the query +// successfully. +func (r *Session) GetRemoteTag(registries []string, repository string, askedTag string) (string, error) { + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) + res, err := r.client.Get(endpoint) + if err != nil { + return "", err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return "", ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + var tagID string + if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { + return "", err + } + return tagID, nil + } + return "", fmt.Errorf("Could not reach any registry endpoint") +} + +// GetRemoteTags retrieves all tags from the given repository. It queries each +// of the registries supplied in the registries argument, and returns data from +// the first one that answers the query successfully. It returns a map with +// tag names as the keys and image IDs as the values. +func (r *Session) GetRemoteTags(registries []string, repository string) (map[string]string, error) { + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + res, err := r.client.Get(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return nil, ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + result := make(map[string]string) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedURL, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedURL.Scheme + // The registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +// GetRepositoryData returns lists of images and endpoints for the repository +func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote) + + logrus.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := http.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + // check if the error is because of i/o timeout + // and return a non-obtuse error message for users + // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" + // was a top search on the docker user forum + if strings.HasSuffix(err.Error(), "i/o timeout") { + return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) + } + return nil, fmt.Errorf("Error while pulling image: %v", err) + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errLoginRequired + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode == 404 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } else if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res) + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + remoteChecksums := []*ImgData{} + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData, len(remoteChecksums)) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + }, nil +} + +// PushImageChecksumRegistry uploads checksums for an image +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { + u := registry + "images/" + imgData.ID + "/checksum" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, nil) + if err != nil { + return err + } + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %v", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.client.Jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) + } + return nil +} + +// PushImageJSONRegistry pushes JSON metadata for a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { + + u := registry + "images/" + imgData.ID + "/json" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + } + return nil +} + +// PushImageLayerRegistry sends the checksum of an image layer to the registry +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + u := registry + "images/" + imgID + "/layer" + + logrus.Debugf("[registry] Calling PUT %s", u) + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := http.NewRequest("PUT", u, checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + res, err := r.client.Do(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %v", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// PushRegistryTag pushes a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote, revision, tag, registry string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) + + req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + req.ContentLength = int64(len(revision)) + res, err := r.client.Do(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) + } + return nil +} + +// PushImageJSONIndex uploads an image list to the repository +func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) + logrus.Debugf("[registry] PUT %s", u) + logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) + headers := map[string][]string{ + "Content-type": {"application/json"}, + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + "X-Docker-Token": {"true"}, + } + if validate { + headers["X-Docker-Endpoints"] = regs + } + + // Redirect if necessary + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { + return nil, err + } + if !shouldRedirect(res) { + break + } + res.Body.Close() + u = res.Header.Get("Location") + logrus.Debugf("Redirected to %s", u) + } + defer res.Body.Close() + + if res.StatusCode == 401 { + return nil, errLoginRequired + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res) + } + tokens = res.Header["X-Docker-Token"] + logrus.Debugf("Auth token: %v", tokens) + + if res.Header.Get("X-Docker-Endpoints") == "" { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote, errBody), res) + } + } + + return &RepositoryData{ + Endpoints: endpoints, + }, nil +} + +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, err := r.client.Do(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + +// SearchRepositories performs a search against the remote repository +func (r *Session) SearchRepositories(term string) (*SearchResults, error) { + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + } + result := new(SearchResults) + return result, json.NewDecoder(res.Body).Decode(result) +} + +// GetAuthConfig returns the authentication settings for a session +// TODO(tiborvass): remove this once registry client v2 is vendored +func (r *Session) GetAuthConfig(withPasswd bool) *cliconfig.AuthConfig { + password := "" + if withPasswd { + password = r.authConfig.Password + } + return &cliconfig.AuthConfig{ + Username: r.authConfig.Username, + Password: password, + Email: r.authConfig.Email, + } +} diff --git a/registry/token.go b/registry/token.go new file mode 100644 index 00000000..d91bd455 --- /dev/null +++ b/registry/token.go @@ -0,0 +1,81 @@ +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +type tokenResponse struct { + Token string `json:"token"` +} + +func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint) (string, error) { + realm, ok := params["realm"] + if !ok { + return "", errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return "", fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + if realmURL.Scheme == "" { + if registryEndpoint.IsSecure { + realmURL.Scheme = "https" + } else { + realmURL.Scheme = "http" + } + } + + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return "", err + } + + reqParams := req.URL.Query() + service := params["service"] + scope := params["scope"] + + if service != "" { + reqParams.Add("service", service) + } + + for _, scopeField := range strings.Fields(scope) { + reqParams.Add("scope", scopeField) + } + + if username != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("token auth attempt for registry %s: %s request failed with status: %d %s", registryEndpoint, req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + decoder := json.NewDecoder(resp.Body) + + tr := new(tokenResponse) + if err = decoder.Decode(tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.Token == "" { + return "", errors.New("authorization server did not include a token in the response") + } + + return tr.Token, nil +} diff --git a/registry/types.go b/registry/types.go new file mode 100644 index 00000000..09b9d571 --- /dev/null +++ b/registry/types.go @@ -0,0 +1,140 @@ +package registry + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial indicates whether the result is an official repository or not + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsOfficial indicates whether the result is trusted + IsTrusted bool `json:"is_trusted"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the acutal results for the search + Results []SearchResult `json:"results"` +} + +// RepositoryData tracks the image list, list of endpoints, and list of tokens +// for a repository +type RepositoryData struct { + // ImgList is a list of images in the repository + ImgList map[string]*ImgData + // Endpoints is a list of endpoints returned in X-Docker-Endpoints + Endpoints []string + // Tokens is currently unused (remove it?) + Tokens []string +} + +// ImgData is used to transfer image checksums to and from the registry +type ImgData struct { + // ID is an opaque string that identifies the image + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +// PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type PingResult struct { + // Version is the registry version supplied by the registry in a HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +var apiVersions = map[APIVersion]string{ + 1: "v1", + 2: "v2", +} + +// API Version identifiers. +const ( + APIVersionUnknown = iota + APIVersion1 + APIVersion2 +) + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + // Index points to registry information + Index *IndexInfo + // RemoteName is the remote name of the repository, such as + // "library/ubuntu-12.04-base" + RemoteName string + // LocalName is the local name of the repository, such as + // "ubuntu-12.04-base" + LocalName string + // CanonicalName is the canonical name of the repository, such as + // "docker.io/library/ubuntu-12.04-base" + CanonicalName string + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool +} diff --git a/runconfig/compare.go b/runconfig/compare.go new file mode 100644 index 00000000..ebb8ead6 --- /dev/null +++ b/runconfig/compare.go @@ -0,0 +1,63 @@ +package runconfig + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + + if a.Cmd.Len() != b.Cmd.Len() || + len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + a.Entrypoint.Len() != b.Entrypoint.Len() || + len(a.Volumes) != len(b.Volumes) { + return false + } + + aCmd := a.Cmd.Slice() + bCmd := b.Cmd.Slice() + for i := 0; i < len(aCmd); i++ { + if aCmd[i] != bCmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + + aEntrypoint := a.Entrypoint.Slice() + bEntrypoint := b.Entrypoint.Slice() + for i := 0; i < len(aEntrypoint); i++ { + if aEntrypoint[i] != bEntrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/runconfig/compare_test.go b/runconfig/compare_test.go new file mode 100644 index 00000000..e59c3b2f --- /dev/null +++ b/runconfig/compare_test.go @@ -0,0 +1,124 @@ +package runconfig + +import ( + "testing" + + "github.com/docker/docker/pkg/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestCompare(t *testing.T) { + ports1 := make(nat.PortSet) + ports1[newPortNoError("tcp", "1111")] = struct{}{} + ports1[newPortNoError("tcp", "2222")] = struct{}{} + ports2 := make(nat.PortSet) + ports2[newPortNoError("tcp", "3333")] = struct{}{} + ports2[newPortNoError("tcp", "4444")] = struct{}{} + ports3 := make(nat.PortSet) + ports3[newPortNoError("tcp", "1111")] = struct{}{} + ports3[newPortNoError("tcp", "2222")] = struct{}{} + ports3[newPortNoError("tcp", "5555")] = struct{}{} + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + volumes3 := make(map[string]struct{}) + volumes3["/test1"] = struct{}{} + volumes3["/test3"] = struct{}{} + envs1 := []string{"ENV1=value1", "ENV2=value2"} + envs2 := []string{"ENV1=value1", "ENV3=value3"} + entrypoint1 := &Entrypoint{parts: []string{"/bin/sh", "-c"}} + entrypoint2 := &Entrypoint{parts: []string{"/bin/sh", "-d"}} + entrypoint3 := &Entrypoint{parts: []string{"/bin/sh", "-c", "echo"}} + cmd1 := &Command{parts: []string{"/bin/sh", "-c"}} + cmd2 := &Command{parts: []string{"/bin/sh", "-d"}} + cmd3 := &Command{parts: []string{"/bin/sh", "-c", "echo"}} + labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} + labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} + labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} + + sameConfigs := map[*Config]*Config{ + // Empty config + &Config{}: {}, + // Does not compare hostname, domainname & image + &Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user", + }: { + Hostname: "host2", + Domainname: "domain2", + Image: "image2", + User: "user", + }, + // only OpenStdin + &Config{OpenStdin: false}: {OpenStdin: false}, + // only env + &Config{Env: envs1}: {Env: envs1}, + // only cmd + &Config{Cmd: cmd1}: {Cmd: cmd1}, + // only labels + &Config{Labels: labels1}: {Labels: labels1}, + // only exposedPorts + &Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, + // only entrypoints + &Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, + // only volumes + &Config{Volumes: volumes1}: {Volumes: volumes1}, + } + differentConfigs := map[*Config]*Config{ + nil: nil, + &Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user1", + }: { + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user2", + }, + // only OpenStdin + &Config{OpenStdin: false}: {OpenStdin: true}, + &Config{OpenStdin: true}: {OpenStdin: false}, + // only env + &Config{Env: envs1}: {Env: envs2}, + // only cmd + &Config{Cmd: cmd1}: {Cmd: cmd2}, + // not the same number of parts + &Config{Cmd: cmd1}: {Cmd: cmd3}, + // only labels + &Config{Labels: labels1}: {Labels: labels2}, + // not the same number of labels + &Config{Labels: labels1}: {Labels: labels3}, + // only exposedPorts + &Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, + // not the same number of ports + &Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, + // only entrypoints + &Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, + // not the same number of parts + &Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, + // only volumes + &Config{Volumes: volumes1}: {Volumes: volumes2}, + // not the same number of labels + &Config{Volumes: volumes1}: {Volumes: volumes3}, + } + for config1, config2 := range sameConfigs { + if !Compare(config1, config2) { + t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) + } + } + for config1, config2 := range differentConfigs { + if Compare(config1, config2) { + t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) + } + } +} diff --git a/runconfig/config.go b/runconfig/config.go new file mode 100644 index 00000000..3ab9bac2 --- /dev/null +++ b/runconfig/config.go @@ -0,0 +1,191 @@ +package runconfig + +import ( + "encoding/json" + "io" + "strings" + + "github.com/docker/docker/pkg/nat" +) + +// Entrypoint encapsulates the container entrypoint. +// It might be represented as a string or an array of strings. +// We need to override the json decoder to accept both options. +// The JSON decoder will fail if the api sends an string and +// we try to decode it into an array of string. +type Entrypoint struct { + parts []string +} + +func (e *Entrypoint) MarshalJSON() ([]byte, error) { + if e == nil { + return []byte{}, nil + } + return json.Marshal(e.Slice()) +} + +// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings. +func (e *Entrypoint) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + e.parts = p + return nil +} + +func (e *Entrypoint) Len() int { + if e == nil { + return 0 + } + return len(e.parts) +} + +func (e *Entrypoint) Slice() []string { + if e == nil { + return nil + } + return e.parts +} + +func NewEntrypoint(parts ...string) *Entrypoint { + return &Entrypoint{parts} +} + +type Command struct { + parts []string +} + +func (e *Command) ToString() string { + return strings.Join(e.parts, " ") +} + +func (e *Command) MarshalJSON() ([]byte, error) { + if e == nil { + return []byte{}, nil + } + return json.Marshal(e.Slice()) +} + +// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings. +func (e *Command) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + e.parts = p + return nil +} + +func (e *Command) Len() int { + if e == nil { + return 0 + } + return len(e.parts) +} + +func (e *Command) Slice() []string { + if e == nil { + return nil + } + return e.parts +} + +func NewCommand(parts ...string) *Command { + return &Command{parts} +} + +// Note: the Config structure should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports + PublishService string `json:",omitempty"` // Name of the network service exposed by the container + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd *Command // Command to run when starting the container + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + VolumeDriver string `json:",omitempty"` // Name of the volume driver used to mount volumes + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint *Entrypoint // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container +} + +type ContainerConfigWrapper struct { + *Config + InnerHostConfig *HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + *HostConfig // Deprecated. Exported to read attrubutes from json that are not in the inner host config structure. + +} + +func (w *ContainerConfigWrapper) GetHostConfig() *HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CpuShares != 0 && w.InnerHostConfig.CpuShares == 0 { + w.InnerHostConfig.CpuShares = hc.CpuShares + } + + hc = w.InnerHostConfig + } + + if hc != nil && w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + + return hc +} + +// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and an HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func DecodeContainerConfig(src io.Reader) (*Config, *HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, nil, err + } + + return w.Config, w.GetHostConfig(), nil +} diff --git a/runconfig/config_test.go b/runconfig/config_test.go new file mode 100644 index 00000000..9efe1dff --- /dev/null +++ b/runconfig/config_test.go @@ -0,0 +1,228 @@ +package runconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "testing" +) + +func TestEntrypointMarshalJSON(t *testing.T) { + entrypoints := map[*Entrypoint]string{ + nil: "", + &Entrypoint{}: "null", + &Entrypoint{[]string{"/bin/sh", "-c", "echo"}}: `["/bin/sh","-c","echo"]`, + } + + for entrypoint, expected := range entrypoints { + data, err := entrypoint.MarshalJSON() + if err != nil { + t.Fatal(err) + } + if string(data) != expected { + t.Fatalf("Expected %v, got %v", expected, string(data)) + } + } +} + +func TestEntrypointUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + entrypoint := &Entrypoint{ + []string{"default", "values"}, + } + if err := entrypoint.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := entrypoint.Slice() + if len(actualParts) != len(expectedParts) { + t.Fatalf("Expected %v parts, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) + } + for index, part := range actualParts { + if part != expectedParts[index] { + t.Fatalf("Expected %v, got %v", expectedParts, actualParts) + break + } + } + } +} + +func TestCommandToString(t *testing.T) { + commands := map[*Command]string{ + &Command{[]string{""}}: "", + &Command{[]string{"one"}}: "one", + &Command{[]string{"one", "two"}}: "one two", + } + for command, expected := range commands { + toString := command.ToString() + if toString != expected { + t.Fatalf("Expected %v, got %v", expected, toString) + } + } +} + +func TestCommandMarshalJSON(t *testing.T) { + commands := map[*Command]string{ + nil: "", + &Command{}: "null", + &Command{[]string{"/bin/sh", "-c", "echo"}}: `["/bin/sh","-c","echo"]`, + } + + for command, expected := range commands { + data, err := command.MarshalJSON() + if err != nil { + t.Fatal(err) + } + if string(data) != expected { + t.Fatalf("Expected %v, got %v", expected, string(data)) + } + } +} + +func TestCommandUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + command := &Command{ + []string{"default", "values"}, + } + if err := command.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := command.Slice() + if len(actualParts) != len(expectedParts) { + t.Fatalf("Expected %v parts, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) + } + for index, part := range actualParts { + if part != expectedParts[index] { + t.Fatalf("Expected %v, got %v", expectedParts, actualParts) + break + } + } + } +} + +func TestDecodeContainerConfig(t *testing.T) { + fixtures := []struct { + file string + entrypoint *Entrypoint + }{ + {"fixtures/container_config_1_14.json", NewEntrypoint()}, + {"fixtures/container_config_1_17.json", NewEntrypoint("bash")}, + {"fixtures/container_config_1_19.json", NewEntrypoint("bash")}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, h, err := DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Image != "ubuntu" { + t.Fatalf("Expected ubuntu image, found %s\n", c.Image) + } + + if c.Entrypoint.Len() != f.entrypoint.Len() { + t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) + } + + if h.Memory != 1000 { + t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) + } + } +} + +func TestEntrypointUnmarshalString(t *testing.T) { + var e *Entrypoint + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} + +func TestEntrypointUnmarshalSlice(t *testing.T) { + var e *Entrypoint + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} + +func TestCommandUnmarshalSlice(t *testing.T) { + var e *Command + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} + +func TestCommandUnmarshalString(t *testing.T) { + var e *Command + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} diff --git a/runconfig/exec.go b/runconfig/exec.go new file mode 100644 index 00000000..781cb355 --- /dev/null +++ b/runconfig/exec.go @@ -0,0 +1,56 @@ +package runconfig + +import ( + flag "github.com/docker/docker/pkg/mflag" +) + +type ExecConfig struct { + User string + Privileged bool + Tty bool + Container string + AttachStdin bool + AttachStderr bool + AttachStdout bool + Detach bool + Cmd []string +} + +func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { + var ( + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + execCmd []string + container string + ) + cmd.Require(flag.Min, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return nil, err + } + container = cmd.Arg(0) + parsedArgs := cmd.Args() + execCmd = parsedArgs[1:] + + execConfig := &ExecConfig{ + User: *flUser, + // TODO(vishh): Expose 'Privileged' once it is supported. + // + //Privileged: job.GetenvBool("Privileged"), + Tty: *flTty, + Cmd: execCmd, + Container: container, + Detach: *flDetach, + } + + // If -d is not set, attach to everything by default + if !*flDetach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if *flStdin { + execConfig.AttachStdin = true + } + } + + return execConfig, nil +} diff --git a/runconfig/exec_test.go b/runconfig/exec_test.go new file mode 100644 index 00000000..a4b7ea9b --- /dev/null +++ b/runconfig/exec_test.go @@ -0,0 +1,129 @@ +package runconfig + +import ( + "fmt" + "io/ioutil" + "testing" + + flag "github.com/docker/docker/pkg/mflag" +) + +type arguments struct { + args []string +} + +func TestParseExec(t *testing.T) { + invalids := map[*arguments]error{ + &arguments{[]string{"-unknown"}}: fmt.Errorf("flag provided but not defined: -unknown"), + &arguments{[]string{"-u"}}: fmt.Errorf("flag needs an argument: -u"), + &arguments{[]string{"--user"}}: fmt.Errorf("flag needs an argument: --user"), + } + valids := map[*arguments]*ExecConfig{ + &arguments{ + []string{"container", "command"}, + }: { + Container: "container", + Cmd: []string{"command"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + []string{"container", "command1", "command2"}, + }: { + Container: "container", + Cmd: []string{"command1", "command2"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + []string{"-i", "-t", "-u", "uid", "container", "command"}, + }: { + User: "uid", + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + Tty: true, + Container: "container", + Cmd: []string{"command"}, + }, + &arguments{ + []string{"-d", "container", "command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Container: "container", + Cmd: []string{"command"}, + }, + &arguments{ + []string{"-t", "-i", "-d", "container", "command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Tty: true, + Container: "container", + Cmd: []string{"command"}, + }, + } + for invalid, expectedError := range invalids { + cmd := flag.NewFlagSet("exec", flag.ContinueOnError) + cmd.ShortUsage = func() {} + cmd.SetOutput(ioutil.Discard) + _, err := ParseExec(cmd, invalid.args) + if err == nil || err.Error() != expectedError.Error() { + t.Fatalf("Expected an error [%v] for %v, got %v", expectedError, invalid, err) + } + + } + for valid, expectedExecConfig := range valids { + cmd := flag.NewFlagSet("exec", flag.ContinueOnError) + cmd.ShortUsage = func() {} + cmd.SetOutput(ioutil.Discard) + execConfig, err := ParseExec(cmd, valid.args) + if err != nil { + t.Fatal(err) + } + if !compareExecConfig(expectedExecConfig, execConfig) { + t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) + } + } +} + +func compareExecConfig(config1 *ExecConfig, config2 *ExecConfig) bool { + if config1.AttachStderr != config2.AttachStderr { + return false + } + if config1.AttachStdin != config2.AttachStdin { + return false + } + if config1.AttachStdout != config2.AttachStdout { + return false + } + if config1.Container != config2.Container { + return false + } + if config1.Detach != config2.Detach { + return false + } + if config1.Privileged != config2.Privileged { + return false + } + if config1.Tty != config2.Tty { + return false + } + if config1.User != config2.User { + return false + } + if len(config1.Cmd) != len(config2.Cmd) { + return false + } + for index, value := range config1.Cmd { + if value != config2.Cmd[index] { + return false + } + } + return true +} diff --git a/runconfig/fixtures/container_config_1_14.json b/runconfig/fixtures/container_config_1_14.json new file mode 100644 index 00000000..b08334c0 --- /dev/null +++ b/runconfig/fixtures/container_config_1_14.json @@ -0,0 +1,30 @@ +{ + "Hostname":"", + "Domainname": "", + "User":"", + "Memory": 1000, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "bash" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } +} diff --git a/runconfig/fixtures/container_config_1_17.json b/runconfig/fixtures/container_config_1_17.json new file mode 100644 index 00000000..60fc6e25 --- /dev/null +++ b/runconfig/fixtures/container_config_1_17.json @@ -0,0 +1,49 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpt": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } +} diff --git a/runconfig/fixtures/container_config_1_19.json b/runconfig/fixtures/container_config_1_19.json new file mode 100644 index 00000000..9a3ce205 --- /dev/null +++ b/runconfig/fixtures/container_config_1_19.json @@ -0,0 +1,57 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/runconfig/fixtures/container_hostconfig_1_14.json b/runconfig/fixtures/container_hostconfig_1_14.json new file mode 100644 index 00000000..c72ac91c --- /dev/null +++ b/runconfig/fixtures/container_hostconfig_1_14.json @@ -0,0 +1,18 @@ +{ + "Binds": ["/tmp:/tmp"], + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] +} diff --git a/runconfig/fixtures/container_hostconfig_1_19.json b/runconfig/fixtures/container_hostconfig_1_19.json new file mode 100644 index 00000000..5ca8aa7e --- /dev/null +++ b/runconfig/fixtures/container_hostconfig_1_19.json @@ -0,0 +1,30 @@ +{ + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" +} diff --git a/runconfig/fixtures/valid.env b/runconfig/fixtures/valid.env new file mode 100644 index 00000000..3afbdc81 --- /dev/null +++ b/runconfig/fixtures/valid.env @@ -0,0 +1 @@ +ENV1=value1 diff --git a/runconfig/fixtures/valid.label b/runconfig/fixtures/valid.label new file mode 100644 index 00000000..b4208bdf --- /dev/null +++ b/runconfig/fixtures/valid.label @@ -0,0 +1 @@ +LABEL1=value1 diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go new file mode 100644 index 00000000..38255574 --- /dev/null +++ b/runconfig/hostconfig.go @@ -0,0 +1,281 @@ +package runconfig + +import ( + "encoding/json" + "io" + "strings" + + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/ulimit" +) + +type KeyValuePair struct { + Key string + Value string +} + +type NetworkMode string + +type IpcMode string + +// IsPrivate indicates whether container use it's private ipc stack +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +func (n IpcMode) IsHost() bool { + return n == "host" +} + +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +type UTSMode string + +// IsPrivate indicates whether container use it's private UTS namespace +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +func (n UTSMode) IsHost() bool { + return n == "host" +} + +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +type PidMode string + +// IsPrivate indicates whether container use it's private pid stack +func (n PidMode) IsPrivate() bool { + return !(n.IsHost()) +} + +func (n PidMode) IsHost() bool { + return n == "host" +} + +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" +} + +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +type LogConfig struct { + Type string + Config map[string]string +} + +type LxcConfig struct { + values []KeyValuePair +} + +func (c *LxcConfig) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte{}, nil + } + return json.Marshal(c.Slice()) +} + +func (c *LxcConfig) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + var kv []KeyValuePair + if err := json.Unmarshal(b, &kv); err != nil { + var h map[string]string + if err := json.Unmarshal(b, &h); err != nil { + return err + } + for k, v := range h { + kv = append(kv, KeyValuePair{k, v}) + } + } + c.values = kv + + return nil +} + +func (c *LxcConfig) Len() int { + if c == nil { + return 0 + } + return len(c.values) +} + +func (c *LxcConfig) Slice() []KeyValuePair { + if c == nil { + return nil + } + return c.values +} + +func NewLxcConfig(values []KeyValuePair) *LxcConfig { + return &LxcConfig{values} +} + +type CapList struct { + caps []string +} + +func (c *CapList) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte{}, nil + } + return json.Marshal(c.Slice()) +} + +func (c *CapList) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + var caps []string + if err := json.Unmarshal(b, &caps); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + caps = append(caps, s) + } + c.caps = caps + + return nil +} + +func (c *CapList) Len() int { + if c == nil { + return 0 + } + return len(c.caps) +} + +func (c *CapList) Slice() []string { + if c == nil { + return nil + } + return c.caps +} + +func NewCapList(caps []string) *CapList { + return &CapList{caps} +} + +type HostConfig struct { + Binds []string + ContainerIDFile string + LxcConf *LxcConfig + Memory int64 // Memory limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap + CpuShares int64 // CPU shares (relative weight vs. other containers) + CpuPeriod int64 + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + CpuQuota int64 + BlkioWeight int64 // Block IO weight (relative weight vs. other containers) + OomKillDisable bool // Whether to disable OOM Killer or not + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + Privileged bool + PortBindings nat.PortMap + Links []string + PublishAllPorts bool + Dns []string + DnsSearch []string + ExtraHosts []string + VolumesFrom []string + Devices []DeviceMapping + NetworkMode NetworkMode + IpcMode IpcMode + PidMode PidMode + UTSMode UTSMode + CapAdd *CapList + CapDrop *CapList + GroupAdd []string + RestartPolicy RestartPolicy + SecurityOpt []string + ReadonlyRootfs bool + Ulimits []*ulimit.Ulimit + LogConfig LogConfig + CgroupParent string // Parent cgroup. + ConsoleSize [2]int // Initial console size on Windows +} + +func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper { + return &ContainerConfigWrapper{ + config, + hostConfig, + "", nil, + } +} + +func DecodeHostConfig(src io.Reader) (*HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.GetHostConfig() + + return hc, nil +} diff --git a/runconfig/hostconfig_test.go b/runconfig/hostconfig_test.go new file mode 100644 index 00000000..7c0befc7 --- /dev/null +++ b/runconfig/hostconfig_test.go @@ -0,0 +1,303 @@ +package runconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "testing" +) + +func TestNetworkModeTest(t *testing.T) { + networkModes := map[NetworkMode][]bool{ + // private, bridge, host, container, none, default + "": {true, false, false, false, false, false}, + "something:weird": {true, false, false, false, false, false}, + "bridge": {true, true, false, false, false, false}, + DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, + "host": {false, false, true, false, false, false}, + "container:name": {false, false, false, true, false, false}, + "none": {true, false, false, false, true, false}, + "default": {true, false, false, false, false, true}, + } + networkModeNames := map[NetworkMode]string{ + "": "", + "something:weird": "", + "bridge": "bridge", + DefaultDaemonNetworkMode(): "bridge", + "host": "host", + "container:name": "container", + "none": "none", + "default": "default", + } + for networkMode, state := range networkModes { + if networkMode.IsPrivate() != state[0] { + t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) + } + if networkMode.IsBridge() != state[1] { + t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) + } + if networkMode.IsHost() != state[2] { + t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) + } + if networkMode.IsContainer() != state[3] { + t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) + } + if networkMode.IsNone() != state[4] { + t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) + } + if networkMode.IsDefault() != state[5] { + t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) + } + if networkMode.NetworkName() != networkModeNames[networkMode] { + t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) + } + } +} + +func TestIpcModeTest(t *testing.T) { + ipcModes := map[IpcMode][]bool{ + // private, host, container, valid + "": {true, false, false, true}, + "something:weird": {true, false, false, false}, + ":weird": {true, false, false, true}, + "host": {false, true, false, true}, + "container:name": {false, false, true, true}, + "container:name:something": {false, false, true, false}, + "container:": {false, false, true, false}, + } + for ipcMode, state := range ipcModes { + if ipcMode.IsPrivate() != state[0] { + t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) + } + if ipcMode.IsHost() != state[1] { + t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) + } + if ipcMode.IsContainer() != state[2] { + t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) + } + if ipcMode.Valid() != state[3] { + t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) + } + } + containerIpcModes := map[IpcMode]string{ + "": "", + "something": "", + "something:weird": "weird", + "container": "", + "container:": "", + "container:name": "name", + "container:name1:name2": "name1:name2", + } + for ipcMode, container := range containerIpcModes { + if ipcMode.Container() != container { + t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) + } + } +} + +func TestUTSModeTest(t *testing.T) { + utsModes := map[UTSMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for utsMode, state := range utsModes { + if utsMode.IsPrivate() != state[0] { + t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) + } + if utsMode.IsHost() != state[1] { + t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) + } + if utsMode.Valid() != state[2] { + t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) + } + } +} + +func TestPidModeTest(t *testing.T) { + pidModes := map[PidMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for pidMode, state := range pidModes { + if pidMode.IsPrivate() != state[0] { + t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) + } + if pidMode.IsHost() != state[1] { + t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) + } + if pidMode.Valid() != state[2] { + t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) + } + } +} + +func TestRestartPolicy(t *testing.T) { + restartPolicies := map[RestartPolicy][]bool{ + // none, always, failure + RestartPolicy{}: {false, false, false}, + RestartPolicy{"something", 0}: {false, false, false}, + RestartPolicy{"no", 0}: {true, false, false}, + RestartPolicy{"always", 0}: {false, true, false}, + RestartPolicy{"on-failure", 0}: {false, false, true}, + } + for restartPolicy, state := range restartPolicies { + if restartPolicy.IsNone() != state[0] { + t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) + } + if restartPolicy.IsAlways() != state[1] { + t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) + } + if restartPolicy.IsOnFailure() != state[2] { + t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) + } + } +} + +func TestLxcConfigMarshalJSON(t *testing.T) { + lxcConfigs := map[*LxcConfig]string{ + nil: "", + &LxcConfig{}: "null", + &LxcConfig{ + []KeyValuePair{{"key1", "value1"}}, + }: `[{"Key":"key1","Value":"value1"}]`, + } + + for lxcconfig, expected := range lxcConfigs { + data, err := lxcconfig.MarshalJSON() + if err != nil { + t.Fatal(err) + } + if string(data) != expected { + t.Fatalf("Expected %v, got %v", expected, string(data)) + } + } +} + +func TestLxcConfigUnmarshalJSON(t *testing.T) { + keyvaluePairs := map[string][]KeyValuePair{ + "": {{"key1", "value1"}}, + "[]": {}, + `[{"Key":"key2","Value":"value2"}]`: {{"key2", "value2"}}, + } + for json, expectedParts := range keyvaluePairs { + lxcConfig := &LxcConfig{ + []KeyValuePair{{"key1", "value1"}}, + } + if err := lxcConfig.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := lxcConfig.Slice() + if len(actualParts) != len(expectedParts) { + t.Fatalf("Expected %v keyvaluePairs, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) + } + for index, part := range actualParts { + if part != expectedParts[index] { + t.Fatalf("Expected %v, got %v", expectedParts, actualParts) + break + } + } + } +} + +func TestMergeConfigs(t *testing.T) { + expectedHostname := "hostname" + expectedContainerIDFile := "containerIdFile" + config := &Config{ + Hostname: expectedHostname, + } + hostConfig := &HostConfig{ + ContainerIDFile: expectedContainerIDFile, + } + containerConfigWrapper := MergeConfigs(config, hostConfig) + if containerConfigWrapper.Config.Hostname != expectedHostname { + t.Fatalf("containerConfigWrapper config hostname expected %v got %v", expectedHostname, containerConfigWrapper.Config.Hostname) + } + if containerConfigWrapper.InnerHostConfig.ContainerIDFile != expectedContainerIDFile { + t.Fatalf("containerConfigWrapper hostconfig containerIdfile expected %v got %v", expectedContainerIDFile, containerConfigWrapper.InnerHostConfig.ContainerIDFile) + } + if containerConfigWrapper.Cpuset != "" { + t.Fatalf("Expected empty Cpuset, got %v", containerConfigWrapper.Cpuset) + } +} + +func TestDecodeHostConfig(t *testing.T) { + fixtures := []struct { + file string + }{ + {"fixtures/container_hostconfig_1_14.json"}, + {"fixtures/container_hostconfig_1_19.json"}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, err := DecodeHostConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Privileged != false { + t.Fatalf("Expected privileged false, found %v\n", c.Privileged) + } + + if l := len(c.Binds); l != 1 { + t.Fatalf("Expected 1 bind, found %d\n", l) + } + + if c.CapAdd.Len() != 1 && c.CapAdd.Slice()[0] != "NET_ADMIN" { + t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) + } + + if c.CapDrop.Len() != 1 && c.CapDrop.Slice()[0] != "NET_ADMIN" { + t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) + } + } +} + +func TestCapListUnmarshalSliceAndString(t *testing.T) { + var cl *CapList + cap0, err := json.Marshal([]string{"CAP_SOMETHING"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(cap0, &cl); err != nil { + t.Fatal(err) + } + + slice := cl.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "CAP_SOMETHING" { + t.Fatalf("expected `CAP_SOMETHING`, got: %q", slice[0]) + } + + cap1, err := json.Marshal("CAP_SOMETHING") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(cap1, &cl); err != nil { + t.Fatal(err) + } + + slice = cl.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "CAP_SOMETHING" { + t.Fatalf("expected `CAP_SOMETHING`, got: %q", slice[0]) + } +} diff --git a/runconfig/hostconfig_unix.go b/runconfig/hostconfig_unix.go new file mode 100644 index 00000000..af470a8b --- /dev/null +++ b/runconfig/hostconfig_unix.go @@ -0,0 +1,52 @@ +// +build !windows + +package runconfig + +import ( + "strings" +) + +// IsPrivate indicates whether container use it's private network stack +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +func DefaultDaemonNetworkMode() NetworkMode { + return NetworkMode("bridge") +} + +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } + return "" +} + +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +func (n NetworkMode) IsNone() bool { + return n == "none" +} diff --git a/runconfig/hostconfig_windows.go b/runconfig/hostconfig_windows.go new file mode 100644 index 00000000..6f96d3f4 --- /dev/null +++ b/runconfig/hostconfig_windows.go @@ -0,0 +1,16 @@ +package runconfig + +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +func DefaultDaemonNetworkMode() NetworkMode { + return NetworkMode("default") +} + +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } + return "" +} diff --git a/runconfig/merge.go b/runconfig/merge.go new file mode 100644 index 00000000..df53ef96 --- /dev/null +++ b/runconfig/merge.go @@ -0,0 +1,65 @@ +package runconfig + +import ( + "strings" + + "github.com/docker/docker/pkg/nat" +) + +func Merge(userConf, imageConf *Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Entrypoint.Len() == 0 { + if userConf.Cmd.Len() == 0 { + userConf.Cmd = imageConf.Cmd + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + return nil +} diff --git a/runconfig/merge_test.go b/runconfig/merge_test.go new file mode 100644 index 00000000..6237ee9d --- /dev/null +++ b/runconfig/merge_test.go @@ -0,0 +1,83 @@ +package runconfig + +import ( + "testing" + + "github.com/docker/docker/pkg/nat" +) + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + portsImage := make(nat.PortSet) + portsImage[newPortNoError("tcp", "1111")] = struct{}{} + portsImage[newPortNoError("tcp", "2222")] = struct{}{} + configImage := &Config{ + ExposedPorts: portsImage, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + portsUser := make(nat.PortSet) + portsUser[newPortNoError("tcp", "2222")] = struct{}{} + portsUser[newPortNoError("tcp", "3333")] = struct{}{} + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &Config{ + ExposedPorts: portsUser, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := Merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &Config{ + ExposedPorts: ports, + } + + if err := Merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) + } + } +} diff --git a/runconfig/parse.go b/runconfig/parse.go new file mode 100644 index 00000000..b5ca0b8f --- /dev/null +++ b/runconfig/parse.go @@ -0,0 +1,507 @@ +package runconfig + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/units" +) + +var ( + ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior") + ErrConflictNetworkAndDns = fmt.Errorf("Conflicting options: --dns and the network mode (--net)") + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and the network mode (--net)") + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior") + ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: --mac-address and the network mode (--net)") + ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: --add-host and the network mode (--net)") + ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: -p, -P, --publish-all, --publish and the network mode (--net)") + ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: --expose and the network mode (--expose)") +) + +// validateNM is the set of fields passed to validateNetMode() +type validateNM struct { + netMode NetworkMode + flHostname *string + flLinks opts.ListOpts + flDns opts.ListOpts + flExtraHosts opts.ListOpts + flMacAddress *string + flPublish opts.ListOpts + flPublishAll *bool + flExpose opts.ListOpts + flVolumeDriver string +} + +func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSet, error) { + var ( + // FIXME: use utils.ListOpts for attach and volumes? + flAttach = opts.NewListOpts(opts.ValidateAttach) + flVolumes = opts.NewListOpts(opts.ValidatePath) + flLinks = opts.NewListOpts(opts.ValidateLink) + flEnv = opts.NewListOpts(opts.ValidateEnv) + flLabels = opts.NewListOpts(opts.ValidateEnv) + flDevices = opts.NewListOpts(opts.ValidateDevice) + + flUlimits = opts.NewUlimitOpt(nil) + + flPublish = opts.NewListOpts(nil) + flExpose = opts.NewListOpts(nil) + flDns = opts.NewListOpts(opts.ValidateIPAddress) + flDnsSearch = opts.NewListOpts(opts.ValidateDNSSearch) + flExtraHosts = opts.NewListOpts(opts.ValidateExtraHost) + flVolumesFrom = opts.NewListOpts(nil) + flLxcOpts = opts.NewListOpts(nil) + flEnvFile = opts.NewListOpts(nil) + flCapAdd = opts.NewListOpts(nil) + flCapDrop = opts.NewListOpts(nil) + flGroupAdd = opts.NewListOpts(nil) + flSecurityOpt = opts.NewListOpts(nil) + flLabelsFile = opts.NewListOpts(nil) + flLoggingOpts = opts.NewListOpts(nil) + + flNetwork = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container") + flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") + flPidMode = cmd.String([]string{"-pid"}, "", "PID namespace to use") + flUTSMode = cmd.String([]string{"-uts"}, "", "UTS namespace to use") + flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports") + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flOomKillDisable = cmd.Bool([]string{"-oom-kill-disable"}, false, "Disable OOM Killer") + flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") + flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") + flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") + flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemorySwap = cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") + flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCpuPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flCpuQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flCpusetCpus = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flBlkioWeight = cmd.Int64([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") + flSwappiness = cmd.Int64([]string{"-memory-swappiness"}, -1, "Tuning container memory swappiness (0 to 100)") + flNetMode = cmd.String([]string{"-net"}, "default", "Set the Network mode for the container") + flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") + flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") + flRestartPolicy = cmd.String([]string{"-restart"}, "no", "Restart policy to apply when a container exits") + flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only") + flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container") + flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") + flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container") + ) + + cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR") + cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume") + cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container") + cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container") + cmd.Var(&flLabels, []string{"l", "-label"}, "Set meta data on a container") + cmd.Var(&flLabelsFile, []string{"-label-file"}, "Read in a line delimited file of labels") + cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") + cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a file of environment variables") + cmd.Var(&flPublish, []string{"p", "-publish"}, "Publish a container's port(s) to the host") + cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port or a range of ports") + cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom DNS servers") + cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains") + cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") + cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options") + cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") + cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") + cmd.Var(&flGroupAdd, []string{"-group-add"}, "Add additional groups to join") + cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") + cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") + cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options") + + expFlags := attachExperimentalFlags(cmd) + + cmd.Require(flag.Min, 1) + + if err := cmd.ParseFlags(args, true); err != nil { + return nil, nil, cmd, err + } + + var ( + attachStdin = flAttach.Get("stdin") + attachStdout = flAttach.Get("stdout") + attachStderr = flAttach.Get("stderr") + ) + + netMode, err := parseNetMode(*flNetMode) + if err != nil { + return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) + } + + vals := validateNM{ + netMode: netMode, + flHostname: flHostname, + flLinks: flLinks, + flDns: flDns, + flExtraHosts: flExtraHosts, + flMacAddress: flMacAddress, + flPublish: flPublish, + flPublishAll: flPublishAll, + flExpose: flExpose, + } + + if err := validateNetMode(&vals); err != nil { + return nil, nil, cmd, err + } + + // Validate the input mac address + if *flMacAddress != "" { + if _, err := opts.ValidateMACAddress(*flMacAddress); err != nil { + return nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress) + } + } + if *flStdin { + attachStdin = true + } + // If -a is not set attach to the output stdio + if flAttach.Len() == 0 { + attachStdout = true + attachStderr = true + } + + var flMemory int64 + if *flMemoryString != "" { + parsedMemory, err := units.RAMInBytes(*flMemoryString) + if err != nil { + return nil, nil, cmd, err + } + flMemory = parsedMemory + } + + var MemorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + MemorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) + if err != nil { + return nil, nil, cmd, err + } + MemorySwap = parsedMemorySwap + } + } + + swappiness := *flSwappiness + if swappiness != -1 && (swappiness < 0 || swappiness > 100) { + return nil, nil, cmd, fmt.Errorf("Invalid value: %d. Valid memory swappiness range is 0-100", swappiness) + } + + var binds []string + // add any bind targets to the list of container volumes + for bind := range flVolumes.GetMap() { + if arr := strings.Split(bind, ":"); len(arr) > 1 { + if arr[1] == "/" { + return nil, nil, cmd, fmt.Errorf("Invalid bind mount: destination can't be '/'") + } + // after creating the bind mount we want to delete it from the flVolumes values because + // we do not want bind mounts being committed to image configs + binds = append(binds, bind) + flVolumes.Delete(bind) + } else if bind == "/" { + return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'") + } + } + + var ( + parsedArgs = cmd.Args() + runCmd *Command + entrypoint *Entrypoint + image = cmd.Arg(0) + ) + if len(parsedArgs) > 1 { + runCmd = NewCommand(parsedArgs[1:]...) + } + if *flEntrypoint != "" { + entrypoint = NewEntrypoint(*flEntrypoint) + } + + lc, err := parseKeyValueOpts(flLxcOpts) + if err != nil { + return nil, nil, cmd, err + } + lxcConf := NewLxcConfig(lc) + + var ( + domainname string + hostname = *flHostname + parts = strings.SplitN(hostname, ".", 2) + ) + if len(parts) > 1 { + hostname = parts[0] + domainname = parts[1] + } + + ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) + if err != nil { + return nil, nil, cmd, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range flExpose.GetAll() { + if strings.Contains(e, ":") { + return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) + } + //support two formats for expose, original format /[] or /[] + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + //if expose a port, the start and end port are the same + start, end, err := parsers.ParsePortRange(port) + if err != nil { + return nil, nil, cmd, fmt.Errorf("Invalid range format for --expose: %s, error: %s", e, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return nil, nil, cmd, err + } + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } + + // parse device mappings + deviceMappings := []DeviceMapping{} + for _, device := range flDevices.GetAll() { + deviceMapping, err := ParseDevice(device) + if err != nil { + return nil, nil, cmd, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables, err := readKVStrings(flEnvFile.GetAll(), flEnv.GetAll()) + if err != nil { + return nil, nil, cmd, err + } + + // collect all the labels for the container + labels, err := readKVStrings(flLabelsFile.GetAll(), flLabels.GetAll()) + if err != nil { + return nil, nil, cmd, err + } + + ipcMode := IpcMode(*flIpcMode) + if !ipcMode.Valid() { + return nil, nil, cmd, fmt.Errorf("--ipc: invalid IPC mode") + } + + pidMode := PidMode(*flPidMode) + if !pidMode.Valid() { + return nil, nil, cmd, fmt.Errorf("--pid: invalid PID mode") + } + + utsMode := UTSMode(*flUTSMode) + if !utsMode.Valid() { + return nil, nil, cmd, fmt.Errorf("--uts: invalid UTS mode") + } + + restartPolicy, err := ParseRestartPolicy(*flRestartPolicy) + if err != nil { + return nil, nil, cmd, err + } + + loggingOpts, err := parseLoggingOpts(*flLoggingDriver, flLoggingOpts.GetAll()) + if err != nil { + return nil, nil, cmd, err + } + + config := &Config{ + Hostname: hostname, + Domainname: domainname, + ExposedPorts: ports, + User: *flUser, + Tty: *flTty, + NetworkDisabled: !*flNetwork, + OpenStdin: *flStdin, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, + Cmd: runCmd, + Image: image, + Volumes: flVolumes.GetMap(), + MacAddress: *flMacAddress, + Entrypoint: entrypoint, + WorkingDir: *flWorkingDir, + Labels: convertKVStringsToMap(labels), + VolumeDriver: *flVolumeDriver, + } + + hostConfig := &HostConfig{ + Binds: binds, + ContainerIDFile: *flContainerIDFile, + LxcConf: lxcConf, + Memory: flMemory, + MemorySwap: MemorySwap, + CpuShares: *flCpuShares, + CpuPeriod: *flCpuPeriod, + CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CpuQuota: *flCpuQuota, + BlkioWeight: *flBlkioWeight, + OomKillDisable: *flOomKillDisable, + MemorySwappiness: flSwappiness, + Privileged: *flPrivileged, + PortBindings: portBindings, + Links: flLinks.GetAll(), + PublishAllPorts: *flPublishAll, + Dns: flDns.GetAll(), + DnsSearch: flDnsSearch.GetAll(), + ExtraHosts: flExtraHosts.GetAll(), + VolumesFrom: flVolumesFrom.GetAll(), + NetworkMode: netMode, + IpcMode: ipcMode, + PidMode: pidMode, + UTSMode: utsMode, + Devices: deviceMappings, + CapAdd: NewCapList(flCapAdd.GetAll()), + CapDrop: NewCapList(flCapDrop.GetAll()), + GroupAdd: flGroupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: flSecurityOpt.GetAll(), + ReadonlyRootfs: *flReadonlyRootfs, + Ulimits: flUlimits.GetList(), + LogConfig: LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, + CgroupParent: *flCgroupParent, + } + + applyExperimentalFlags(expFlags, config, hostConfig) + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + return config, hostConfig, cmd, nil +} + +// reads a file of line terminated key=value pairs and override that with override parameter +func readKVStrings(files []string, override []string) ([]string, error) { + envVariables := []string{} + for _, ef := range files { + parsedVars, err := opts.ParseEnvFile(ef) + if err != nil { + return nil, err + } + envVariables = append(envVariables, parsedVars...) + } + // parse the '-e' and '--env' after, to allow override + envVariables = append(envVariables, override...) + + return envVariables, nil +} + +// converts ["key=value"] to {"key":"value"} +func convertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} + +func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { + loggingOptsMap := convertKVStringsToMap(loggingOpts) + if loggingDriver == "none" && len(loggingOpts) > 0 { + return map[string]string{}, fmt.Errorf("Invalid logging opts for driver %s", loggingDriver) + } + return loggingOptsMap, nil +} + +// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func ParseRestartPolicy(policy string) (RestartPolicy, error) { + p := RestartPolicy{} + + if policy == "" { + return p, nil + } + + var ( + parts = strings.Split(policy, ":") + name = parts[0] + ) + + p.Name = name + switch name { + case "always": + if len(parts) > 1 { + return p, fmt.Errorf("maximum restart count not valid with restart policy of \"always\"") + } + case "no": + // do nothing + case "on-failure": + if len(parts) > 2 { + return p, fmt.Errorf("restart count format is not valid, usage: 'on-failure:N' or 'on-failure'") + } + if len(parts) == 2 { + count, err := strconv.Atoi(parts[1]) + if err != nil { + return p, err + } + + p.MaximumRetryCount = count + } + default: + return p, fmt.Errorf("invalid restart policy %s", name) + } + + return p, nil +} + +func parseKeyValueOpts(opts opts.ListOpts) ([]KeyValuePair, error) { + out := make([]KeyValuePair, opts.Len()) + for i, o := range opts.GetAll() { + k, v, err := parsers.ParseKeyValueOpt(o) + if err != nil { + return nil, err + } + out[i] = KeyValuePair{Key: k, Value: v} + } + return out, nil +} + +func ParseDevice(device string) (DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + dst = arr[1] + fallthrough + case 1: + src = arr[0] + default: + return DeviceMapping{}, fmt.Errorf("Invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, + } + return deviceMapping, nil +} diff --git a/runconfig/parse_experimental.go b/runconfig/parse_experimental.go new file mode 100644 index 00000000..8f8612ba --- /dev/null +++ b/runconfig/parse_experimental.go @@ -0,0 +1,19 @@ +// +build experimental + +package runconfig + +import flag "github.com/docker/docker/pkg/mflag" + +type experimentalFlags struct { + flags map[string]interface{} +} + +func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags { + flags := make(map[string]interface{}) + flags["publish-service"] = cmd.String([]string{"-publish-service"}, "", "Publish this container as a service") + return &experimentalFlags{flags: flags} +} + +func applyExperimentalFlags(exp *experimentalFlags, config *Config, hostConfig *HostConfig) { + config.PublishService = *(exp.flags["publish-service"]).(*string) +} diff --git a/runconfig/parse_stub.go b/runconfig/parse_stub.go new file mode 100644 index 00000000..391b6ed4 --- /dev/null +++ b/runconfig/parse_stub.go @@ -0,0 +1,14 @@ +// +build !experimental + +package runconfig + +import flag "github.com/docker/docker/pkg/mflag" + +type experimentalFlags struct{} + +func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags { + return nil +} + +func applyExperimentalFlags(flags *experimentalFlags, config *Config, hostConfig *HostConfig) { +} diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go new file mode 100644 index 00000000..5e918769 --- /dev/null +++ b/runconfig/parse_test.go @@ -0,0 +1,558 @@ +package runconfig + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" + + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/parsers" +) + +func parseRun(args []string) (*Config, *HostConfig, *flag.FlagSet, error) { + cmd := flag.NewFlagSet("run", flag.ContinueOnError) + cmd.SetOutput(ioutil.Discard) + cmd.Usage = nil + return Parse(cmd, args) +} + +func parse(t *testing.T, args string) (*Config, *HostConfig, error) { + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) + return config, hostConfig, err +} + +func mustParse(t *testing.T, args string) (*Config, *HostConfig) { + config, hostConfig, err := parse(t, args) + if err != nil { + t.Fatal(err) + } + return config, hostConfig +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return fmt.Errorf("strings don't match") +} +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + + if _, _, err := parse(t, "-a"); err == nil { + t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid"); err == nil { + t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdin -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-d --rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") + } +} + +func TestParseRunVolumes(t *testing.T) { + if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + } else if _, exists := config.Volumes["/var"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp", "/hostVar:/containerVar") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro", "/hostVar:/containerVar:rw") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /containerTmp:ro -v /containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/containerTmp:ro", "/containerVar:rw") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro,Z -v /hostVar:/containerVar:rw,Z"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro,Z", "/hostVar:/containerVar:rw,Z") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro,Z -v /hostVar:/containerVar:rw,Z` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:Z", "/hostVar:/containerVar:z") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/containerVar"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) + } else if len(config.Volumes) != 0 { + t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) + } + + if _, _, err := parse(t, "-v /"); err == nil { + t.Fatalf("Expected error, but got none") + } + + if _, _, err := parse(t, "-v /:/"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") + } + if _, _, err := parse(t, "-v"); err == nil { + t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") + } + if _, _, err := parse(t, "-v :"); err == nil { + t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") + } + if _, _, err := parse(t, "-v ::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") + } +} + +func TestParseLxcConfOpt(t *testing.T) { + opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} + + for _, o := range opts { + k, v, err := parsers.ParseKeyValueOpt(o) + if err != nil { + t.FailNow() + } + if k != "lxc.utsname" { + t.Fail() + } + if v != "docker" { + t.Fail() + } + } + + // With parseRun too + _, hostconfig, _, err := parseRun([]string{"lxc.utsname=docker", "lxc.utsname = docker ", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + for _, lxcConf := range hostconfig.LxcConf.Slice() { + if lxcConf.Key != "lxc.utsname" || lxcConf.Value != "docker" { + t.Fail() + } + } + +} + +func TestNetHostname(t *testing.T) { + if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}); err != ErrConflictNetworkHostname { + t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}); err != ErrConflictNetworkHostname { + t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container", "img", "cmd"}); err == nil || err.Error() != "--net: invalid net mode: invalid container format container:" { + t.Fatalf("Expected error with --net=container, got : %v", err) + } + if _, _, _, err := parseRun([]string{"--net=weird", "img", "cmd"}); err == nil || err.Error() != "--net: invalid net mode: invalid --net: weird" { + t.Fatalf("Expected error with --net=weird, got: %s", err) + } +} + +func TestConflictContainerNetworkAndLinks(t *testing.T) { + if _, _, _, err := parseRun([]string{"--net=container:other", "--link=zip:zap", "img", "cmd"}); err != ErrConflictContainerNetworkAndLinks { + t.Fatalf("Expected error ErrConflictContainerNetworkAndLinks, got: %s", err) + } + if _, _, _, err := parseRun([]string{"--net=host", "--link=zip:zap", "img", "cmd"}); err != ErrConflictHostNetworkAndLinks { + t.Fatalf("Expected error ErrConflictHostNetworkAndLinks, got: %s", err) + } +} + +func TestConflictNetworkModeAndOptions(t *testing.T) { + if _, _, _, err := parseRun([]string{"--net=host", "--dns=8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkAndDns { + t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "--dns=8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkAndDns { + t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=host", "--add-host=name:8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkHosts { + t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "--add-host=name:8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkHosts { + t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=host", "--mac-address=92:d0:c6:0a:29:33", "img", "cmd"}); err != ErrConflictContainerNetworkAndMac { + t.Fatalf("Expected error ErrConflictContainerNetworkAndMac, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "img", "cmd"}); err != ErrConflictContainerNetworkAndMac { + t.Fatalf("Expected error ErrConflictContainerNetworkAndMac, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "-P", "img", "cmd"}); err != ErrConflictNetworkPublishPorts { + t.Fatalf("Expected error ErrConflictNetworkPublishPorts, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "-p", "8080", "img", "cmd"}); err != ErrConflictNetworkPublishPorts { + t.Fatalf("Expected error ErrConflictNetworkPublishPorts, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "--expose", "8000-9000", "img", "cmd"}); err != ErrConflictNetworkExposePorts { + t.Fatalf("Expected error ErrConflictNetworkExposePorts, got %s", err) + } +} + +// Simple parse with MacAddress validatation +func TestParseWithMacAddress(t *testing.T) { + invalidMacAddress := "--mac-address=invalidMacAddress" + validMacAddress := "--mac-address=92:d0:c6:0a:29:33" + if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { + t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) + } + if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { + t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) + } +} + +func TestParseWithMemory(t *testing.T) { + invalidMemory := "--memory=invalid" + validMemory := "--memory=1G" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { + t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) + } +} + +func TestParseWithMemorySwap(t *testing.T) { + invalidMemory := "--memory-swap=invalid" + validMemory := "--memory-swap=1G" + anotherValidMemory := "--memory-swap=-1" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { + t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } + if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { + t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } +} + +func TestParseHostname(t *testing.T) { + hostname := "--hostname=hostname" + hostnameWithDomain := "--hostname=hostname.domainname" + hostnameWithDomainTld := "--hostname=hostname.domainname.tld" + if config, _ := mustParse(t, hostname); config.Hostname != "hostname" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } + if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname" && config.Domainname != "domainname" { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } + if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname" && config.Domainname != "domainname.tld" { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } +} + +func TestParseWithExpose(t *testing.T) { + invalids := map[string]string{ + ":": "Invalid port format for --expose: :", + "8080:9090": "Invalid port format for --expose: 8080:9090", + "/tcp": "Invalid range format for --expose: /tcp, error: Empty string specified for ports.", + "/udp": "Invalid range format for --expose: /udp, error: Empty string specified for ports.", + "NaN/tcp": `Invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "NaN-NaN/tcp": `Invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "8080-NaN/tcp": `Invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "1234567890-8080/tcp": `Invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, + } + valids := map[string][]nat.Port{ + "8080/tcp": {"8080/tcp"}, + "8080/udp": {"8080/udp"}, + "8080/ncp": {"8080/ncp"}, + "8080-8080/udp": {"8080/udp"}, + "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, + } + for expose, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) + } + } + for expose, exposedPorts := range valids { + config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != len(exposedPorts) { + t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) + } + for _, port := range exposedPorts { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) + } + } + } + // Merge with actual published port + config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != 2 { + t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) + } + ports := []nat.Port{"80/tcp", "81/tcp"} + for _, port := range ports { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) + } + } +} + +func TestParseDevice(t *testing.T) { + valids := map[string]DeviceMapping{ + "/dev/snd": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rwm", + }, + "/dev/snd:/something": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rwm", + }, + "/dev/snd:/something:ro": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "ro", + }, + } + for device, deviceMapping := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(hostconfig.Devices) != 1 { + t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) + } + if hostconfig.Devices[0] != deviceMapping { + t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) + } + } + +} + +func TestParseModes(t *testing.T) { + // ipc ko + if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { + t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) + } + // ipc ok + _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.IpcMode.Valid() { + t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) + } + // pid ko + if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { + t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) + } + // pid ok + _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.PidMode.Valid() { + t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) + } + // uts ko + if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { + t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) + } + // uts ok + _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.UTSMode.Valid() { + t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) + } +} + +func TestParseRestartPolicy(t *testing.T) { + invalids := map[string]string{ + "something": "invalid restart policy something", + "always:2": "maximum restart count not valid with restart policy of \"always\"", + "always:2:3": "maximum restart count not valid with restart policy of \"always\"", + "on-failure:invalid": `strconv.ParseInt: parsing "invalid": invalid syntax`, + "on-failure:2:5": "restart count format is not valid, usage: 'on-failure:N' or 'on-failure'", + } + valids := map[string]RestartPolicy{ + "": {}, + "always": { + Name: "always", + MaximumRetryCount: 0, + }, + "on-failure:1": { + Name: "on-failure", + MaximumRetryCount: 1, + }, + } + for restart, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) + } + } + for restart, expected := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.RestartPolicy != expected { + t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) + } + } +} + +func TestParseLoggingOpts(t *testing.T) { + // logging opts ko + if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "Invalid logging opts for driver none" { + t.Fatalf("Expected an error with message 'Invalid logging opts for driver none', got %v", err) + } + // logging opts ok + _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { + t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) + } +} + +func TestParseEnvfileVariables(t *testing.T) { + // env ko + if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != "open nonexistent: no such file or directory" { + t.Fatalf("Expected an error with message 'open nonexistent: no such file or directory', got %v", err) + } + // env ok + config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { + t.Fatalf("Expected a a config with [ENV1=value1], got %v", config.Env) + } + config, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { + t.Fatalf("Expected a a config with [ENV1=value1 ENV2=value2], got %v", config.Env) + } +} + +func TestParseLabelfileVariables(t *testing.T) { + // label ko + if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != "open nonexistent: no such file or directory" { + t.Fatalf("Expected an error with message 'open nonexistent: no such file or directory', got %v", err) + } + // label ok + config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { + t.Fatalf("Expected a a config with [LABEL1:value1], got %v", config.Labels) + } + config, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { + t.Fatalf("Expected a a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) + } +} + +func TestParseEntryPoint(t *testing.T) { + config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) + if err != nil { + t.Fatal(err) + } + if config.Entrypoint.Len() != 1 && config.Entrypoint.parts[0] != "anything" { + t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) + } +} diff --git a/runconfig/parse_unix.go b/runconfig/parse_unix.go new file mode 100644 index 00000000..067140c7 --- /dev/null +++ b/runconfig/parse_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package runconfig + +import ( + "fmt" + "strings" +) + +func parseNetMode(netMode string) (NetworkMode, error) { + parts := strings.Split(netMode, ":") + switch mode := parts[0]; mode { + case "default", "bridge", "none", "host": + case "container": + if len(parts) < 2 || parts[1] == "" { + return "", fmt.Errorf("invalid container format container:") + } + default: + return "", fmt.Errorf("invalid --net: %s", netMode) + } + return NetworkMode(netMode), nil +} + +func validateNetMode(vals *validateNM) error { + + if (vals.netMode.IsHost() || vals.netMode.IsContainer()) && *vals.flHostname != "" { + return ErrConflictNetworkHostname + } + + if vals.netMode.IsHost() && vals.flLinks.Len() > 0 { + return ErrConflictHostNetworkAndLinks + } + + if vals.netMode.IsContainer() && vals.flLinks.Len() > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if (vals.netMode.IsHost() || vals.netMode.IsContainer()) && vals.flDns.Len() > 0 { + return ErrConflictNetworkAndDns + } + + if (vals.netMode.IsContainer() || vals.netMode.IsHost()) && vals.flExtraHosts.Len() > 0 { + return ErrConflictNetworkHosts + } + + if (vals.netMode.IsContainer() || vals.netMode.IsHost()) && *vals.flMacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if vals.netMode.IsContainer() && (vals.flPublish.Len() > 0 || *vals.flPublishAll == true) { + return ErrConflictNetworkPublishPorts + } + + if vals.netMode.IsContainer() && vals.flExpose.Len() > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} diff --git a/runconfig/parse_windows.go b/runconfig/parse_windows.go new file mode 100644 index 00000000..ca0a2e6d --- /dev/null +++ b/runconfig/parse_windows.go @@ -0,0 +1,20 @@ +package runconfig + +import ( + "fmt" + "strings" +) + +func parseNetMode(netMode string) (NetworkMode, error) { + parts := strings.Split(netMode, ":") + switch mode := parts[0]; mode { + case "default", "none": + default: + return "", fmt.Errorf("invalid --net: %s", netMode) + } + return NetworkMode(netMode), nil +} + +func validateNetMode(vals *validateNM) error { + return nil +} diff --git a/utils/experimental.go b/utils/experimental.go new file mode 100644 index 00000000..ceed0cb3 --- /dev/null +++ b/utils/experimental.go @@ -0,0 +1,9 @@ +// +build experimental + +package utils + +// ExperimentalBuild is a stub which always returns true for +// builds that include the "experimental" build tag +func ExperimentalBuild() bool { + return true +} diff --git a/utils/git.go b/utils/git.go new file mode 100644 index 00000000..4d0bb164 --- /dev/null +++ b/utils/git.go @@ -0,0 +1,100 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" +) + +// GitClone clones a repository into a newly created directory which +// will be under "docker-build-git" +func GitClone(remoteURL string) (string, error) { + if !urlutil.IsGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + u, err := url.Parse(remoteURL) + if err != nil { + return "", err + } + + fragment := u.Fragment + clone := cloneArgs(u, root) + + if output, err := git(clone...); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + + return checkoutGit(fragment, root) +} + +func cloneArgs(remoteURL *url.URL, root string) []string { + args := []string{"clone", "--recursive"} + shallow := len(remoteURL.Fragment) == 0 + + if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { + res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) + if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + shallow = false + } + } + + if shallow { + args = append(args, "--depth", "1") + } + + if remoteURL.Fragment != "" { + remoteURL.Fragment = "" + } + + return append(args, remoteURL.String(), root) +} + +func checkoutGit(fragment, root string) (string, error) { + refAndDir := strings.SplitN(fragment, ":", 2) + + if len(refAndDir[0]) != 0 { + if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) + if err != nil { + return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} diff --git a/utils/git_test.go b/utils/git_test.go new file mode 100644 index 00000000..e9eb5956 --- /dev/null +++ b/utils/git_test.go @@ -0,0 +1,186 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsGit(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsStripFragment(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker#test") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCheckoutGit(t *testing.T) { + root, err := ioutil.TempDir("", "docker-build-git-checkout") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + gitDir := filepath.Join(root, "repo") + _, err = git("init", gitDir) + if err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { + t.Fatal(err) + } + + subDir := filepath.Join(gitDir, "subdir") + if err = os.Mkdir(subDir, 0755); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { + t.Fatal(err) + } + + cases := []struct { + frag string + exp string + fail bool + }{ + {"", "FROM scratch", false}, + {"master", "FROM scratch", false}, + {":subdir", "FROM scratch\nEXPOSE 5000", false}, + {":nosubdir", "", true}, // missing directory error + {":Dockerfile", "", true}, // not a directory error + {"master:nosubdir", "", true}, + {"master:subdir", "FROM scratch\nEXPOSE 5000", false}, + {"master:parentlink", "FROM scratch\nEXPOSE 5000", false}, + {"master:absolutelink", "FROM scratch\nEXPOSE 5000", false}, + {"master:../subdir", "", true}, + {"test", "FROM scratch\nEXPOSE 3000", false}, + {"test:", "FROM scratch\nEXPOSE 3000", false}, + {"test:subdir", "FROM busybox\nEXPOSE 5000", false}, + } + + for _, c := range cases { + r, err := checkoutGit(c.frag, gitDir) + + fail := err != nil + if fail != c.fail { + t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) + } + if c.fail { + continue + } + + b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) + if err != nil { + t.Fatal(err) + } + + if string(b) != c.exp { + t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) + } + } +} diff --git a/utils/stubs.go b/utils/stubs.go new file mode 100644 index 00000000..8a496d39 --- /dev/null +++ b/utils/stubs.go @@ -0,0 +1,9 @@ +// +build !experimental + +package utils + +// ExperimentalBuild is a stub which always returns false for +// builds that do not include the "experimental" build tag +func ExperimentalBuild() bool { + return false +} diff --git a/utils/utils.go b/utils/utils.go new file mode 100644 index 00000000..57418fda --- /dev/null +++ b/utils/utils.go @@ -0,0 +1,288 @@ +package utils + +import ( + "bufio" + "crypto/sha1" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/stringid" +) + +// SelfPath figures out the absolute path of our own binary (if it's still around). +func SelfPath() string { + path, err := exec.LookPath(os.Args[0]) + if err != nil { + if os.IsNotExist(err) { + return "" + } + if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { + return "" + } + panic(err) + } + path, err = filepath.Abs(path) + if err != nil { + if os.IsNotExist(err) { + return "" + } + panic(err) + } + return path +} + +func dockerInitSha1(target string) string { + f, err := os.Open(target) + if err != nil { + return "" + } + defer f.Close() + h := sha1.New() + _, err = io.Copy(h, f) + if err != nil { + return "" + } + return hex.EncodeToString(h.Sum(nil)) +} + +func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) + if target == "" { + return false + } + if dockerversion.IAMSTATIC == "true" { + if selfPath == "" { + return false + } + if target == selfPath { + return true + } + targetFileInfo, err := os.Lstat(target) + if err != nil { + return false + } + selfPathFileInfo, err := os.Lstat(selfPath) + if err != nil { + return false + } + return os.SameFile(targetFileInfo, selfPathFileInfo) + } + return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 +} + +// DockerInitPath figures out the path of our dockerinit (which may be SelfPath()) +func DockerInitPath(localCopy string) string { + selfPath := SelfPath() + if isValidDockerInitPath(selfPath, selfPath) { + // if we're valid, don't bother checking anything else + return selfPath + } + var possibleInits = []string{ + localCopy, + dockerversion.INITPATH, + filepath.Join(filepath.Dir(selfPath), "dockerinit"), + + // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." + // https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec + "/usr/libexec/docker/dockerinit", + "/usr/local/libexec/docker/dockerinit", + + // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." + // https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA + "/usr/lib/docker/dockerinit", + "/usr/local/lib/docker/dockerinit", + } + for _, dockerInit := range possibleInits { + if dockerInit == "" { + continue + } + path, err := exec.LookPath(dockerInit) + if err == nil { + path, err = filepath.Abs(path) + if err != nil { + // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? + panic(err) + } + if isValidDockerInitPath(path, selfPath) { + return path + } + } + } + return "" +} + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = stringid.GenerateRandomID()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +// ReadDockerIgnore reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadDockerIgnore(path string) ([]string, error) { + // Note that a missing .dockerignore file isn't treated as an error + reader, err := os.Open(path) + if err != nil { + if !os.IsNotExist(err) { + return nil, fmt.Errorf("Error reading '%s': %v", path, err) + } + return nil, nil + } + defer reader.Close() + + scanner := bufio.NewScanner(reader) + var excludes []string + + for scanner.Scan() { + pattern := strings.TrimSpace(scanner.Text()) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) + excludes = append(excludes, pattern) + } + if err = scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading '%s': %v", path, err) + } + return excludes, nil +} + +// ImageReference combines `repo` and `ref` and returns a string representing +// the combination. If `ref` is a digest (meaning it's of the form +// :, the returned string is @. Otherwise, +// ref is assumed to be a tag, and the returned string is :. +func ImageReference(repo, ref string) string { + if DigestReference(ref) { + return repo + "@" + ref + } + return repo + ":" + ref +} + +// DigestReference returns true if ref is a digest reference; i.e. if it +// is of the form :. +func DigestReference(ref string) bool { + return strings.Contains(ref, ":") +} diff --git a/utils/utils_test.go b/utils/utils_test.go new file mode 100644 index 00000000..28630094 --- /dev/null +++ b/utils/utils_test.go @@ -0,0 +1,100 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/"} + o = []string{"HOME=/root", "TERM=xterm"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} + +func TestImageReference(t *testing.T) { + tests := []struct { + repo string + ref string + expected string + }{ + {"repo", "tag", "repo:tag"}, + {"repo", "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64", "repo@sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64"}, + } + + for i, test := range tests { + actual := ImageReference(test.repo, test.ref) + if test.expected != actual { + t.Errorf("%d: expected %q, got %q", i, test.expected, actual) + } + } +} + +func TestDigestReference(t *testing.T) { + input := "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64" + if !DigestReference(input) { + t.Errorf("Expected DigestReference=true for input %q", input) + } + + input = "latest" + if DigestReference(input) { + t.Errorf("Unexpected DigestReference=true for input %q", input) + } +} + +func TestReadDockerIgnore(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "dockerignore-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + diName := filepath.Join(tmpDir, ".dockerignore") + + di, err := ReadDockerIgnore(diName) + if err != nil { + t.Fatalf("Expected not to have error, got %s", err) + } + + if diLen := len(di); diLen != 0 { + t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) + } + + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") + err = ioutil.WriteFile(diName, []byte(content), 0777) + if err != nil { + t.Fatal(err) + } + + di, err = ReadDockerIgnore(diName) + if err != nil { + t.Fatal(err) + } + + if di[0] != "test1" { + t.Fatalf("First element is not test1") + } + if di[1] != "/test2" { + t.Fatalf("Second element is not /test2") + } + if di[2] != "/a/file/here" { + t.Fatalf("Third element is not /a/file/here") + } + if di[3] != "lastfile" { + t.Fatalf("Fourth element is not lastfile") + } +} diff --git a/volume/drivers/adapter.go b/volume/drivers/adapter.go new file mode 100644 index 00000000..6846a3a8 --- /dev/null +++ b/volume/drivers/adapter.go @@ -0,0 +1,60 @@ +package volumedrivers + +import "github.com/docker/docker/volume" + +type volumeDriverAdapter struct { + name string + proxy *volumeDriverProxy +} + +func (a *volumeDriverAdapter) Name() string { + return a.name +} + +func (a *volumeDriverAdapter) Create(name string) (volume.Volume, error) { + err := a.proxy.Create(name) + if err != nil { + return nil, err + } + return &volumeAdapter{ + proxy: a.proxy, + name: name, + driverName: a.name}, nil +} + +func (a *volumeDriverAdapter) Remove(v volume.Volume) error { + return a.proxy.Remove(v.Name()) +} + +type volumeAdapter struct { + proxy *volumeDriverProxy + name string + driverName string + eMount string // ephemeral host volume path +} + +func (a *volumeAdapter) Name() string { + return a.name +} + +func (a *volumeAdapter) DriverName() string { + return a.driverName +} + +func (a *volumeAdapter) Path() string { + if len(a.eMount) > 0 { + return a.eMount + } + m, _ := a.proxy.Path(a.name) + return m +} + +func (a *volumeAdapter) Mount() (string, error) { + var err error + a.eMount, err = a.proxy.Mount(a.name) + return a.eMount, err +} + +func (a *volumeAdapter) Unmount() error { + return a.proxy.Unmount(a.name) +} diff --git a/volume/drivers/api.go b/volume/drivers/api.go new file mode 100644 index 00000000..3c685ae1 --- /dev/null +++ b/volume/drivers/api.go @@ -0,0 +1,23 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type VolumeDriver -name VolumeDriver + +package volumedrivers + +import "github.com/docker/docker/volume" + +func NewVolumeDriver(name string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name, proxy} +} + +type VolumeDriver interface { + // Create a volume with the given name + Create(name string) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name string) (err error) +} diff --git a/volume/drivers/extpoint.go b/volume/drivers/extpoint.go new file mode 100644 index 00000000..b002a0ff --- /dev/null +++ b/volume/drivers/extpoint.go @@ -0,0 +1,61 @@ +package volumedrivers + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/volume" +) + +// currently created by hand. generation tool would generate this like: +// $ extpoint-gen Driver > volume/extpoint.go + +var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)} + +type driverExtpoint struct { + extensions map[string]volume.Driver + sync.Mutex +} + +func Register(extension volume.Driver, name string) bool { + drivers.Lock() + defer drivers.Unlock() + if name == "" { + return false + } + _, exists := drivers.extensions[name] + if exists { + return false + } + drivers.extensions[name] = extension + return true +} + +func Unregister(name string) bool { + drivers.Lock() + defer drivers.Unlock() + _, exists := drivers.extensions[name] + if !exists { + return false + } + delete(drivers.extensions, name) + return true +} + +func Lookup(name string) (volume.Driver, error) { + drivers.Lock() + defer drivers.Unlock() + ext, ok := drivers.extensions[name] + if ok { + return ext, nil + } + pl, err := plugins.Get(name, "VolumeDriver") + if err != nil { + return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + } + + d := NewVolumeDriver(name, pl.Client) + drivers.extensions[name] = d + return d, nil +} diff --git a/volume/drivers/proxy.go b/volume/drivers/proxy.go new file mode 100644 index 00000000..9fd68855 --- /dev/null +++ b/volume/drivers/proxy.go @@ -0,0 +1,149 @@ +// generated code - DO NOT EDIT + +package volumedrivers + +import "errors" + +type client interface { + Call(string, interface{}, interface{}) error +} + +type volumeDriverProxy struct { + client +} + +type volumeDriverProxyCreateRequest struct { + Name string +} + +type volumeDriverProxyCreateResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Create(name string) (err error) { + var ( + req volumeDriverProxyCreateRequest + ret volumeDriverProxyCreateResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyRemoveRequest struct { + Name string +} + +type volumeDriverProxyRemoveResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Remove(name string) (err error) { + var ( + req volumeDriverProxyRemoveRequest + ret volumeDriverProxyRemoveResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyPathRequest struct { + Name string +} + +type volumeDriverProxyPathResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyPathRequest + ret volumeDriverProxyPathResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyMountRequest struct { + Name string +} + +type volumeDriverProxyMountResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Mount(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyMountRequest + ret volumeDriverProxyMountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyUnmountRequest struct { + Name string +} + +type volumeDriverProxyUnmountResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Unmount(name string) (err error) { + var ( + req volumeDriverProxyUnmountRequest + ret volumeDriverProxyUnmountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} diff --git a/volume/drivers/proxy_test.go b/volume/drivers/proxy_test.go new file mode 100644 index 00000000..cadf8c0d --- /dev/null +++ b/volume/drivers/proxy_test.go @@ -0,0 +1,96 @@ +package volumedrivers + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/tlsconfig" +) + +func TestVolumeRequestError(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot create volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot remove volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot mount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot unmount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Unknown volume"}`) + }) + + u, _ := url.Parse(server.URL) + client, err := plugins.NewClient("tcp://"+u.Host, tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatal(err) + } + + driver := volumeDriverProxy{client} + + if err = driver.Create("volume"); err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot create volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Mount("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot mount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Unmount("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot unmount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Remove("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot remove volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Path("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Unknown volume") { + t.Fatalf("Unexpected error: %v\n", err) + } +} diff --git a/volume/local/local.go b/volume/local/local.go new file mode 100644 index 00000000..688ea6c7 --- /dev/null +++ b/volume/local/local.go @@ -0,0 +1,177 @@ +package local + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/docker/volume" +) + +// VolumeDataPathName is the name of the directory where the volume data is stored. +// It uses a very distintive name to avoid colissions migrating data between +// Docker versions. +const ( + VolumeDataPathName = "_data" + volumesPathName = "volumes" +) + +var oldVfsDir = filepath.Join("vfs", "dir") + +func New(scope string) (*Root, error) { + rootDirectory := filepath.Join(scope, volumesPathName) + + if err := os.MkdirAll(rootDirectory, 0700); err != nil { + return nil, err + } + + r := &Root{ + scope: scope, + path: rootDirectory, + volumes: make(map[string]*Volume), + } + + dirs, err := ioutil.ReadDir(rootDirectory) + if err != nil { + return nil, err + } + + for _, d := range dirs { + name := filepath.Base(d.Name()) + r.volumes[name] = &Volume{ + driverName: r.Name(), + name: name, + path: r.DataPath(name), + } + } + return r, nil +} + +type Root struct { + m sync.Mutex + scope string + path string + volumes map[string]*Volume +} + +func (r *Root) DataPath(volumeName string) string { + return filepath.Join(r.path, volumeName, VolumeDataPathName) +} + +func (r *Root) Name() string { + return "local" +} + +func (r *Root) Create(name string) (volume.Volume, error) { + r.m.Lock() + defer r.m.Unlock() + + v, exists := r.volumes[name] + if !exists { + path := r.DataPath(name) + if err := os.MkdirAll(path, 0755); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, err + } + v = &Volume{ + driverName: r.Name(), + name: name, + path: path, + } + r.volumes[name] = v + } + v.use() + return v, nil +} + +func (r *Root) Remove(v volume.Volume) error { + r.m.Lock() + defer r.m.Unlock() + lv, ok := v.(*Volume) + if !ok { + return errors.New("unknown volume type") + } + lv.release() + if lv.usedCount == 0 { + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + return err + } + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root: %s", realPath) + } + + if err := os.RemoveAll(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return os.RemoveAll(filepath.Dir(lv.path)) + } + return nil +} + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} + +type Volume struct { + m sync.Mutex + usedCount int + // unique name of the volume + name string + // path is the path on the host where the data lives + path string + // driverName is the name of the driver that created the volume. + driverName string +} + +func (v *Volume) Name() string { + return v.name +} + +func (v *Volume) DriverName() string { + return v.driverName +} + +func (v *Volume) Path() string { + return v.path +} + +func (v *Volume) Mount() (string, error) { + return v.path, nil +} + +func (v *Volume) Unmount() error { + return nil +} + +func (v *Volume) use() { + v.m.Lock() + v.usedCount++ + v.m.Unlock() +} + +func (v *Volume) release() { + v.m.Lock() + v.usedCount-- + v.m.Unlock() +} diff --git a/volume/volume.go b/volume/volume.go new file mode 100644 index 00000000..2d5ee35a --- /dev/null +++ b/volume/volume.go @@ -0,0 +1,57 @@ +package volume + +const DefaultDriverName = "local" + +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given id. + Create(string) (Volume, error) + // Remove deletes the volume. + Remove(Volume) error +} + +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount() (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount() error +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "rw,Z": true, + "rw,z": true, + "z,rw": true, + "Z,rw": true, + "Z": true, + "z": true, +} + +// read-only modes +var roModes = map[string]bool{ + "ro": true, + "ro,Z": true, + "ro,z": true, + "z,ro": true, + "Z,ro": true, +} + +// ValidateMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode and if it's read-write or not. +func ValidateMountMode(mode string) (bool, bool) { + return roModes[mode] || rwModes[mode], rwModes[mode] +} + +// ReadOnly tells you if a mode string is a valid read-only mode or not. +func ReadWrite(mode string) bool { + return rwModes[mode] +}