summaryrefslogtreecommitdiff
path: root/image
diff options
context:
space:
mode:
authorhwajeong.son <hwajeong.son@samsung.com>2018-08-20 13:30:55 +0900
committerhwajeong.son <hwajeong.son@samsung.com>2018-08-20 13:30:55 +0900
commit0b51891e5977b87f986f4db2cbbe09295cfdbedc (patch)
treec35ac732cb1dffccee5a32131431f753481077c2 /image
parenteea0e89806b2cf59af3dccabc67014bd19b91b82 (diff)
downloaddocker-engine-master.tar.gz
docker-engine-master.tar.bz2
docker-engine-master.zip
Signed-off-by: hwajeong.son <hwajeong.son@samsung.com>
Diffstat (limited to 'image')
-rw-r--r--image/cache/cache.go253
-rw-r--r--image/cache/compare.go63
-rw-r--r--image/cache/compare_test.go126
-rw-r--r--image/fs.go178
-rw-r--r--image/fs_test.go275
-rw-r--r--image/image.go220
-rw-r--r--image/image_test.go53
-rw-r--r--image/rootfs.go44
-rw-r--r--image/spec/v1.1.md637
-rw-r--r--image/spec/v1.2.md696
-rw-r--r--image/spec/v1.md573
-rw-r--r--image/store.go367
-rw-r--r--image/store_test.go178
-rw-r--r--image/tarexport/load.go431
-rw-r--r--image/tarexport/save.go409
-rw-r--r--image/tarexport/tarexport.go47
-rw-r--r--image/v1/imagev1.go150
-rw-r--r--image/v1/imagev1_test.go55
18 files changed, 4755 insertions, 0 deletions
diff --git a/image/cache/cache.go b/image/cache/cache.go
new file mode 100644
index 0000000..e074beb
--- /dev/null
+++ b/image/cache/cache.go
@@ -0,0 +1,253 @@
+package cache
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+
+ containertypes "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/dockerversion"
+ "github.com/docker/docker/image"
+ "github.com/docker/docker/layer"
+ "github.com/pkg/errors"
+)
+
+// NewLocal returns a local image cache, based on parent chain
+func NewLocal(store image.Store) *LocalImageCache {
+ return &LocalImageCache{
+ store: store,
+ }
+}
+
+// LocalImageCache is cache based on parent chain.
+type LocalImageCache struct {
+ store image.Store
+}
+
+// GetCache returns the image id found in the cache
+func (lic *LocalImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) {
+ return getImageIDAndError(getLocalCachedImage(lic.store, image.ID(imgID), config))
+}
+
+// New returns an image cache, based on history objects
+func New(store image.Store) *ImageCache {
+ return &ImageCache{
+ store: store,
+ localImageCache: NewLocal(store),
+ }
+}
+
+// ImageCache is cache based on history objects. Requires initial set of images.
+type ImageCache struct {
+ sources []*image.Image
+ store image.Store
+ localImageCache *LocalImageCache
+}
+
+// Populate adds an image to the cache (to be queried later)
+func (ic *ImageCache) Populate(image *image.Image) {
+ ic.sources = append(ic.sources, image)
+}
+
+// GetCache returns the image id found in the cache
+func (ic *ImageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) {
+ imgID, err := ic.localImageCache.GetCache(parentID, cfg)
+ if err != nil {
+ return "", err
+ }
+ if imgID != "" {
+ for _, s := range ic.sources {
+ if ic.isParent(s.ID(), image.ID(imgID)) {
+ return imgID, nil
+ }
+ }
+ }
+
+ var parent *image.Image
+ lenHistory := 0
+ if parentID != "" {
+ parent, err = ic.store.Get(image.ID(parentID))
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to find image %v", parentID)
+ }
+ lenHistory = len(parent.History)
+ }
+
+ for _, target := range ic.sources {
+ if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) {
+ continue
+ }
+
+ if len(target.History)-1 == lenHistory { // last
+ if parent != nil {
+ if err := ic.store.SetParent(target.ID(), parent.ID()); err != nil {
+ return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID())
+ }
+ }
+ return target.ID().String(), nil
+ }
+
+ imgID, err := ic.restoreCachedImage(parent, target, cfg)
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID())
+ }
+
+ ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm
+ return imgID.String(), nil
+ }
+
+ return "", nil
+}
+
+func (ic *ImageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) {
+ var history []image.History
+ rootFS := image.NewRootFS()
+ lenHistory := 0
+ if parent != nil {
+ history = parent.History
+ rootFS = parent.RootFS
+ lenHistory = len(parent.History)
+ }
+ history = append(history, target.History[lenHistory])
+ if layer := getLayerForHistoryIndex(target, lenHistory); layer != "" {
+ rootFS.Append(layer)
+ }
+
+ config, err := json.Marshal(&image.Image{
+ V1Image: image.V1Image{
+ DockerVersion: dockerversion.Version,
+ Config: cfg,
+ Architecture: target.Architecture,
+ OS: target.OS,
+ Author: target.Author,
+ Created: history[len(history)-1].Created,
+ },
+ RootFS: rootFS,
+ History: history,
+ OSFeatures: target.OSFeatures,
+ OSVersion: target.OSVersion,
+ })
+ if err != nil {
+ return "", errors.Wrap(err, "failed to marshal image config")
+ }
+
+ imgID, err := ic.store.Create(config)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to create cache image")
+ }
+
+ if parent != nil {
+ if err := ic.store.SetParent(imgID, parent.ID()); err != nil {
+ return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID())
+ }
+ }
+ return imgID, nil
+}
+
+func (ic *ImageCache) isParent(imgID, parentID image.ID) bool {
+ nextParent, err := ic.store.GetParent(imgID)
+ if err != nil {
+ return false
+ }
+ if nextParent == parentID {
+ return true
+ }
+ return ic.isParent(nextParent, parentID)
+}
+
+func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID {
+ layerIndex := 0
+ for i, h := range image.History {
+ if i == index {
+ if h.EmptyLayer {
+ return ""
+ }
+ break
+ }
+ if !h.EmptyLayer {
+ layerIndex++
+ }
+ }
+ return image.RootFS.DiffIDs[layerIndex] // validate?
+}
+
+func isValidConfig(cfg *containertypes.Config, h image.History) bool {
+ // todo: make this format better than join that loses data
+ return strings.Join(cfg.Cmd, " ") == h.CreatedBy
+}
+
+func isValidParent(img, parent *image.Image) bool {
+ if len(img.History) == 0 {
+ return false
+ }
+ if parent == nil || len(parent.History) == 0 && len(parent.RootFS.DiffIDs) == 0 {
+ return true
+ }
+ if len(parent.History) >= len(img.History) {
+ return false
+ }
+ if len(parent.RootFS.DiffIDs) > len(img.RootFS.DiffIDs) {
+ return false
+ }
+
+ for i, h := range parent.History {
+ if !reflect.DeepEqual(h, img.History[i]) {
+ return false
+ }
+ }
+ for i, d := range parent.RootFS.DiffIDs {
+ if d != img.RootFS.DiffIDs[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func getImageIDAndError(img *image.Image, err error) (string, error) {
+ if img == nil || err != nil {
+ return "", err
+ }
+ return img.ID().String(), nil
+}
+
+// getLocalCachedImage returns the most recent created image that is a child
+// of the image with imgID, that had the same config when it was
+// created. nil is returned if a child cannot be found. An error is
+// returned if the parent image cannot be found.
+func getLocalCachedImage(imageStore image.Store, imgID image.ID, config *containertypes.Config) (*image.Image, error) {
+ // Loop on the children of the given image and check the config
+ getMatch := func(siblings []image.ID) (*image.Image, error) {
+ var match *image.Image
+ for _, id := range siblings {
+ img, err := imageStore.Get(id)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find image %q", id)
+ }
+
+ if compare(&img.ContainerConfig, config) {
+ // check for the most up to date match
+ if match == nil || match.Created.Before(img.Created) {
+ match = img
+ }
+ }
+ }
+ return match, nil
+ }
+
+ // In this case, this is `FROM scratch`, which isn't an actual image.
+ if imgID == "" {
+ images := imageStore.Map()
+ var siblings []image.ID
+ for id, img := range images {
+ if img.Parent == imgID {
+ siblings = append(siblings, id)
+ }
+ }
+ return getMatch(siblings)
+ }
+
+ // find match from child images
+ siblings := imageStore.Children(imgID)
+ return getMatch(siblings)
+}
diff --git a/image/cache/compare.go b/image/cache/compare.go
new file mode 100644
index 0000000..9237932
--- /dev/null
+++ b/image/cache/compare.go
@@ -0,0 +1,63 @@
+package cache
+
+import (
+ "github.com/docker/docker/api/types/container"
+)
+
+// compare two Config struct. Do not compare the "Image" nor "Hostname" fields
+// If OpenStdin is set, then it differs
+func compare(a, b *container.Config) bool {
+ if a == nil || b == nil ||
+ a.OpenStdin || b.OpenStdin {
+ return false
+ }
+ if a.AttachStdout != b.AttachStdout ||
+ a.AttachStderr != b.AttachStderr ||
+ a.User != b.User ||
+ a.OpenStdin != b.OpenStdin ||
+ a.Tty != b.Tty {
+ return false
+ }
+
+ if len(a.Cmd) != len(b.Cmd) ||
+ len(a.Env) != len(b.Env) ||
+ len(a.Labels) != len(b.Labels) ||
+ len(a.ExposedPorts) != len(b.ExposedPorts) ||
+ len(a.Entrypoint) != len(b.Entrypoint) ||
+ len(a.Volumes) != len(b.Volumes) {
+ return false
+ }
+
+ for i := 0; i < len(a.Cmd); i++ {
+ if a.Cmd[i] != b.Cmd[i] {
+ return false
+ }
+ }
+ for i := 0; i < len(a.Env); i++ {
+ if a.Env[i] != b.Env[i] {
+ return false
+ }
+ }
+ for k, v := range a.Labels {
+ if v != b.Labels[k] {
+ return false
+ }
+ }
+ for k := range a.ExposedPorts {
+ if _, exists := b.ExposedPorts[k]; !exists {
+ return false
+ }
+ }
+
+ for i := 0; i < len(a.Entrypoint); i++ {
+ if a.Entrypoint[i] != b.Entrypoint[i] {
+ return false
+ }
+ }
+ for key := range a.Volumes {
+ if _, exists := b.Volumes[key]; !exists {
+ return false
+ }
+ }
+ return true
+}
diff --git a/image/cache/compare_test.go b/image/cache/compare_test.go
new file mode 100644
index 0000000..7cc0589
--- /dev/null
+++ b/image/cache/compare_test.go
@@ -0,0 +1,126 @@
+package cache
+
+import (
+ "testing"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/go-connections/nat"
+)
+
+// Just to make life easier
+func newPortNoError(proto, port string) nat.Port {
+ p, _ := nat.NewPort(proto, port)
+ return p
+}
+
+func TestCompare(t *testing.T) {
+ ports1 := make(nat.PortSet)
+ ports1[newPortNoError("tcp", "1111")] = struct{}{}
+ ports1[newPortNoError("tcp", "2222")] = struct{}{}
+ ports2 := make(nat.PortSet)
+ ports2[newPortNoError("tcp", "3333")] = struct{}{}
+ ports2[newPortNoError("tcp", "4444")] = struct{}{}
+ ports3 := make(nat.PortSet)
+ ports3[newPortNoError("tcp", "1111")] = struct{}{}
+ ports3[newPortNoError("tcp", "2222")] = struct{}{}
+ ports3[newPortNoError("tcp", "5555")] = struct{}{}
+ volumes1 := make(map[string]struct{})
+ volumes1["/test1"] = struct{}{}
+ volumes2 := make(map[string]struct{})
+ volumes2["/test2"] = struct{}{}
+ volumes3 := make(map[string]struct{})
+ volumes3["/test1"] = struct{}{}
+ volumes3["/test3"] = struct{}{}
+ envs1 := []string{"ENV1=value1", "ENV2=value2"}
+ envs2 := []string{"ENV1=value1", "ENV3=value3"}
+ entrypoint1 := strslice.StrSlice{"/bin/sh", "-c"}
+ entrypoint2 := strslice.StrSlice{"/bin/sh", "-d"}
+ entrypoint3 := strslice.StrSlice{"/bin/sh", "-c", "echo"}
+ cmd1 := strslice.StrSlice{"/bin/sh", "-c"}
+ cmd2 := strslice.StrSlice{"/bin/sh", "-d"}
+ cmd3 := strslice.StrSlice{"/bin/sh", "-c", "echo"}
+ labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"}
+ labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"}
+ labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"}
+
+ sameConfigs := map[*container.Config]*container.Config{
+ // Empty config
+ &container.Config{}: {},
+ // Does not compare hostname, domainname & image
+ &container.Config{
+ Hostname: "host1",
+ Domainname: "domain1",
+ Image: "image1",
+ User: "user",
+ }: {
+ Hostname: "host2",
+ Domainname: "domain2",
+ Image: "image2",
+ User: "user",
+ },
+ // only OpenStdin
+ &container.Config{OpenStdin: false}: {OpenStdin: false},
+ // only env
+ &container.Config{Env: envs1}: {Env: envs1},
+ // only cmd
+ &container.Config{Cmd: cmd1}: {Cmd: cmd1},
+ // only labels
+ &container.Config{Labels: labels1}: {Labels: labels1},
+ // only exposedPorts
+ &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1},
+ // only entrypoints
+ &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1},
+ // only volumes
+ &container.Config{Volumes: volumes1}: {Volumes: volumes1},
+ }
+ differentConfigs := map[*container.Config]*container.Config{
+ nil: nil,
+ &container.Config{
+ Hostname: "host1",
+ Domainname: "domain1",
+ Image: "image1",
+ User: "user1",
+ }: {
+ Hostname: "host1",
+ Domainname: "domain1",
+ Image: "image1",
+ User: "user2",
+ },
+ // only OpenStdin
+ &container.Config{OpenStdin: false}: {OpenStdin: true},
+ &container.Config{OpenStdin: true}: {OpenStdin: false},
+ // only env
+ &container.Config{Env: envs1}: {Env: envs2},
+ // only cmd
+ &container.Config{Cmd: cmd1}: {Cmd: cmd2},
+ // not the same number of parts
+ &container.Config{Cmd: cmd1}: {Cmd: cmd3},
+ // only labels
+ &container.Config{Labels: labels1}: {Labels: labels2},
+ // not the same number of labels
+ &container.Config{Labels: labels1}: {Labels: labels3},
+ // only exposedPorts
+ &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2},
+ // not the same number of ports
+ &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3},
+ // only entrypoints
+ &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2},
+ // not the same number of parts
+ &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3},
+ // only volumes
+ &container.Config{Volumes: volumes1}: {Volumes: volumes2},
+ // not the same number of labels
+ &container.Config{Volumes: volumes1}: {Volumes: volumes3},
+ }
+ for config1, config2 := range sameConfigs {
+ if !compare(config1, config2) {
+ t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2)
+ }
+ }
+ for config1, config2 := range differentConfigs {
+ if compare(config1, config2) {
+ t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2)
+ }
+ }
+}
diff --git a/image/fs.go b/image/fs.go
new file mode 100644
index 0000000..10f6dab
--- /dev/null
+++ b/image/fs.go
@@ -0,0 +1,178 @@
+package image
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// DigestWalkFunc is function called by StoreBackend.Walk
+type DigestWalkFunc func(id digest.Digest) error
+
+// StoreBackend provides interface for image.Store persistence
+type StoreBackend interface {
+ Walk(f DigestWalkFunc) error
+ Get(id digest.Digest) ([]byte, error)
+ Set(data []byte) (digest.Digest, error)
+ Delete(id digest.Digest) error
+ SetMetadata(id digest.Digest, key string, data []byte) error
+ GetMetadata(id digest.Digest, key string) ([]byte, error)
+ DeleteMetadata(id digest.Digest, key string) error
+}
+
+// fs implements StoreBackend using the filesystem.
+type fs struct {
+ sync.RWMutex
+ root string
+}
+
+const (
+ contentDirName = "content"
+ metadataDirName = "metadata"
+)
+
+// NewFSStoreBackend returns new filesystem based backend for image.Store
+func NewFSStoreBackend(root string) (StoreBackend, error) {
+ return newFSStore(root)
+}
+
+func newFSStore(root string) (*fs, error) {
+ s := &fs{
+ root: root,
+ }
+ if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil {
+ return nil, errors.Wrap(err, "failed to create storage backend")
+ }
+ if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil {
+ return nil, errors.Wrap(err, "failed to create storage backend")
+ }
+ return s, nil
+}
+
+func (s *fs) contentFile(dgst digest.Digest) string {
+ return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex())
+}
+
+func (s *fs) metadataDir(dgst digest.Digest) string {
+ return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex())
+}
+
+// Walk calls the supplied callback for each image ID in the storage backend.
+func (s *fs) Walk(f DigestWalkFunc) error {
+ // Only Canonical digest (sha256) is currently supported
+ s.RLock()
+ dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
+ s.RUnlock()
+ if err != nil {
+ return err
+ }
+ for _, v := range dir {
+ dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
+ if err := dgst.Validate(); err != nil {
+ logrus.Debugf("skipping invalid digest %s: %s", dgst, err)
+ continue
+ }
+ if err := f(dgst); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Get returns the content stored under a given digest.
+func (s *fs) Get(dgst digest.Digest) ([]byte, error) {
+ s.RLock()
+ defer s.RUnlock()
+
+ return s.get(dgst)
+}
+
+func (s *fs) get(dgst digest.Digest) ([]byte, error) {
+ content, err := ioutil.ReadFile(s.contentFile(dgst))
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get digest %s", dgst)
+ }
+
+ // todo: maybe optional
+ if digest.FromBytes(content) != dgst {
+ return nil, fmt.Errorf("failed to verify: %v", dgst)
+ }
+
+ return content, nil
+}
+
+// Set stores content by checksum.
+func (s *fs) Set(data []byte) (digest.Digest, error) {
+ s.Lock()
+ defer s.Unlock()
+
+ if len(data) == 0 {
+ return "", fmt.Errorf("invalid empty data")
+ }
+
+ dgst := digest.FromBytes(data)
+ if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil {
+ return "", errors.Wrap(err, "failed to write digest data")
+ }
+
+ return dgst, nil
+}
+
+// Delete removes content and metadata files associated with the digest.
+func (s *fs) Delete(dgst digest.Digest) error {
+ s.Lock()
+ defer s.Unlock()
+
+ if err := os.RemoveAll(s.metadataDir(dgst)); err != nil {
+ return err
+ }
+ if err := os.Remove(s.contentFile(dgst)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// SetMetadata sets metadata for a given ID. It fails if there's no base file.
+func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error {
+ s.Lock()
+ defer s.Unlock()
+ if _, err := s.get(dgst); err != nil {
+ return err
+ }
+
+ baseDir := filepath.Join(s.metadataDir(dgst))
+ if err := os.MkdirAll(baseDir, 0700); err != nil {
+ return err
+ }
+ return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600)
+}
+
+// GetMetadata returns metadata for a given digest.
+func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) {
+ s.RLock()
+ defer s.RUnlock()
+
+ if _, err := s.get(dgst); err != nil {
+ return nil, err
+ }
+ bytes, err := ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key))
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to read metadata")
+ }
+ return bytes, nil
+}
+
+// DeleteMetadata removes the metadata associated with a digest.
+func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error {
+ s.Lock()
+ defer s.Unlock()
+
+ return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key))
+}
diff --git a/image/fs_test.go b/image/fs_test.go
new file mode 100644
index 0000000..5f2437c
--- /dev/null
+++ b/image/fs_test.go
@@ -0,0 +1,275 @@
+package image
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/docker/docker/pkg/testutil"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+)
+
+func defaultFSStoreBackend(t *testing.T) (StoreBackend, func()) {
+ tmpdir, err := ioutil.TempDir("", "images-fs-store")
+ assert.NoError(t, err)
+
+ fsBackend, err := NewFSStoreBackend(tmpdir)
+ assert.NoError(t, err)
+
+ return fsBackend, func() { os.RemoveAll(tmpdir) }
+}
+
+func TestFSGetInvalidData(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ id, err := store.Set([]byte("foobar"))
+ assert.NoError(t, err)
+
+ dgst := digest.Digest(id)
+
+ err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600)
+ assert.NoError(t, err)
+
+ _, err = store.Get(id)
+ testutil.ErrorContains(t, err, "failed to verify")
+}
+
+func TestFSInvalidSet(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ id := digest.FromBytes([]byte("foobar"))
+ err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Hex()), 0700)
+ assert.NoError(t, err)
+
+ _, err = store.Set([]byte("foobar"))
+ testutil.ErrorContains(t, err, "failed to write digest data")
+}
+
+func TestFSInvalidRoot(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "images-fs-store")
+ assert.NoError(t, err)
+ defer os.RemoveAll(tmpdir)
+
+ tcases := []struct {
+ root, invalidFile string
+ }{
+ {"root", "root"},
+ {"root", "root/content"},
+ {"root", "root/metadata"},
+ }
+
+ for _, tc := range tcases {
+ root := filepath.Join(tmpdir, tc.root)
+ filePath := filepath.Join(tmpdir, tc.invalidFile)
+ err := os.MkdirAll(filepath.Dir(filePath), 0700)
+ assert.NoError(t, err)
+
+ f, err := os.Create(filePath)
+ assert.NoError(t, err)
+ f.Close()
+
+ _, err = NewFSStoreBackend(root)
+ testutil.ErrorContains(t, err, "failed to create storage backend")
+
+ os.RemoveAll(root)
+ }
+
+}
+
+func TestFSMetadataGetSet(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ id, err := store.Set([]byte("foo"))
+ assert.NoError(t, err)
+
+ id2, err := store.Set([]byte("bar"))
+ assert.NoError(t, err)
+
+ tcases := []struct {
+ id digest.Digest
+ key string
+ value []byte
+ }{
+ {id, "tkey", []byte("tval1")},
+ {id, "tkey2", []byte("tval2")},
+ {id2, "tkey", []byte("tval3")},
+ }
+
+ for _, tc := range tcases {
+ err = store.SetMetadata(tc.id, tc.key, tc.value)
+ assert.NoError(t, err)
+
+ actual, err := store.GetMetadata(tc.id, tc.key)
+ assert.NoError(t, err)
+
+ if bytes.Compare(actual, tc.value) != 0 {
+ t.Fatalf("Metadata expected %q, got %q", tc.value, actual)
+ }
+ }
+
+ _, err = store.GetMetadata(id2, "tkey2")
+ testutil.ErrorContains(t, err, "failed to read metadata")
+
+ id3 := digest.FromBytes([]byte("baz"))
+ err = store.SetMetadata(id3, "tkey", []byte("tval"))
+ testutil.ErrorContains(t, err, "failed to get digest")
+
+ _, err = store.GetMetadata(id3, "tkey")
+ testutil.ErrorContains(t, err, "failed to get digest")
+}
+
+func TestFSInvalidWalker(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ fooID, err := store.Set([]byte("foo"))
+ assert.NoError(t, err)
+
+ err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, "sha256/foobar"), []byte("foobar"), 0600)
+ assert.NoError(t, err)
+
+ n := 0
+ err = store.Walk(func(id digest.Digest) error {
+ assert.Equal(t, fooID, id)
+ n++
+ return nil
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, 1, n)
+}
+
+func TestFSGetSet(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ type tcase struct {
+ input []byte
+ expected digest.Digest
+ }
+ tcases := []tcase{
+ {[]byte("foobar"), digest.Digest("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")},
+ }
+
+ randomInput := make([]byte, 8*1024)
+ _, err := rand.Read(randomInput)
+ assert.NoError(t, err)
+
+ // skipping use of digest pkg because it is used by the implementation
+ h := sha256.New()
+ _, err = h.Write(randomInput)
+ assert.NoError(t, err)
+
+ tcases = append(tcases, tcase{
+ input: randomInput,
+ expected: digest.Digest("sha256:" + hex.EncodeToString(h.Sum(nil))),
+ })
+
+ for _, tc := range tcases {
+ id, err := store.Set([]byte(tc.input))
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expected, id)
+ }
+
+ for _, tc := range tcases {
+ data, err := store.Get(tc.expected)
+ assert.NoError(t, err)
+ if bytes.Compare(data, tc.input) != 0 {
+ t.Fatalf("expected data %q, got %q", tc.input, data)
+ }
+ }
+}
+
+func TestFSGetUnsetKey(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} {
+ _, err := store.Get(key)
+ testutil.ErrorContains(t, err, "failed to get digest")
+ }
+}
+
+func TestFSGetEmptyData(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ for _, emptyData := range [][]byte{nil, {}} {
+ _, err := store.Set(emptyData)
+ testutil.ErrorContains(t, err, "invalid empty data")
+ }
+}
+
+func TestFSDelete(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ id, err := store.Set([]byte("foo"))
+ assert.NoError(t, err)
+
+ id2, err := store.Set([]byte("bar"))
+ assert.NoError(t, err)
+
+ err = store.Delete(id)
+ assert.NoError(t, err)
+
+ _, err = store.Get(id)
+ testutil.ErrorContains(t, err, "failed to get digest")
+
+ _, err = store.Get(id2)
+ assert.NoError(t, err)
+
+ err = store.Delete(id2)
+ assert.NoError(t, err)
+
+ _, err = store.Get(id2)
+ testutil.ErrorContains(t, err, "failed to get digest")
+}
+
+func TestFSWalker(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ id, err := store.Set([]byte("foo"))
+ assert.NoError(t, err)
+
+ id2, err := store.Set([]byte("bar"))
+ assert.NoError(t, err)
+
+ tcases := make(map[digest.Digest]struct{})
+ tcases[id] = struct{}{}
+ tcases[id2] = struct{}{}
+ n := 0
+ err = store.Walk(func(id digest.Digest) error {
+ delete(tcases, id)
+ n++
+ return nil
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, 2, n)
+ assert.Len(t, tcases, 0)
+}
+
+func TestFSWalkerStopOnError(t *testing.T) {
+ store, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ id, err := store.Set([]byte("foo"))
+ assert.NoError(t, err)
+
+ tcases := make(map[digest.Digest]struct{})
+ tcases[id] = struct{}{}
+ err = store.Walk(func(id digest.Digest) error {
+ return errors.New("what")
+ })
+ testutil.ErrorContains(t, err, "what")
+}
diff --git a/image/image.go b/image/image.go
new file mode 100644
index 0000000..ab95d93
--- /dev/null
+++ b/image/image.go
@@ -0,0 +1,220 @@
+package image
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/dockerversion"
+ "github.com/docker/docker/layer"
+ "github.com/opencontainers/go-digest"
+)
+
+// ID is the content-addressable ID of an image.
+type ID digest.Digest
+
+func (id ID) String() string {
+ return id.Digest().String()
+}
+
+// Digest converts ID into a digest
+func (id ID) Digest() digest.Digest {
+ return digest.Digest(id)
+}
+
+// IDFromDigest creates an ID from a digest
+func IDFromDigest(digest digest.Digest) ID {
+ return ID(digest)
+}
+
+// V1Image stores the V1 image configuration.
+type V1Image struct {
+ // ID is a unique 64 character identifier of the image
+ ID string `json:"id,omitempty"`
+ // Parent is the ID of the parent image
+ Parent string `json:"parent,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Container is the id of the container used to commit
+ Container string `json:"container,omitempty"`
+ // ContainerConfig is the configuration of the container that is committed into the image
+ ContainerConfig container.Config `json:"container_config,omitempty"`
+ // DockerVersion specifies the version of Docker that was used to build the image
+ DockerVersion string `json:"docker_version,omitempty"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // Config is the configuration of the container received from the client
+ Config *container.Config `json:"config,omitempty"`
+ // Architecture is the hardware that the image is built and runs on
+ Architecture string `json:"architecture,omitempty"`
+ // OS is the operating system used to build and run the image
+ OS string `json:"os,omitempty"`
+ // Size is the total size of the image including all layers it is composed of
+ Size int64 `json:",omitempty"`
+}
+
+// Image stores the image configuration
+type Image struct {
+ V1Image
+ Parent ID `json:"parent,omitempty"`
+ RootFS *RootFS `json:"rootfs,omitempty"`
+ History []History `json:"history,omitempty"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+
+ // rawJSON caches the immutable JSON associated with this image.
+ rawJSON []byte
+
+ // computedID is the ID computed from the hash of the image config.
+ // Not to be confused with the legacy V1 ID in V1Image.
+ computedID ID
+}
+
+// RawJSON returns the immutable JSON associated with the image.
+func (img *Image) RawJSON() []byte {
+ return img.rawJSON
+}
+
+// ID returns the image's content-addressable ID.
+func (img *Image) ID() ID {
+ return img.computedID
+}
+
+// ImageID stringifies ID.
+func (img *Image) ImageID() string {
+ return img.ID().String()
+}
+
+// RunConfig returns the image's container config.
+func (img *Image) RunConfig() *container.Config {
+ return img.Config
+}
+
+// Platform returns the image's operating system. If not populated, defaults to the host runtime OS.
+func (img *Image) Platform() string {
+ os := img.OS
+ if os == "" {
+ os = runtime.GOOS
+ }
+ return os
+}
+
+// MarshalJSON serializes the image to JSON. It sorts the top-level keys so
+// that JSON that's been manipulated by a push/pull cycle with a legacy
+// registry won't end up with a different key order.
+func (img *Image) MarshalJSON() ([]byte, error) {
+ type MarshalImage Image
+
+ pass1, err := json.Marshal(MarshalImage(*img))
+ if err != nil {
+ return nil, err
+ }
+
+ var c map[string]*json.RawMessage
+ if err := json.Unmarshal(pass1, &c); err != nil {
+ return nil, err
+ }
+ return json.Marshal(c)
+}
+
+// ChildConfig is the configuration to apply to an Image to create a new
+// Child image. Other properties of the image are copied from the parent.
+type ChildConfig struct {
+ ContainerID string
+ Author string
+ Comment string
+ DiffID layer.DiffID
+ ContainerConfig *container.Config
+ Config *container.Config
+}
+
+// NewChildImage creates a new Image as a child of this image.
+func NewChildImage(img *Image, child ChildConfig, platform string) *Image {
+ isEmptyLayer := layer.IsEmpty(child.DiffID)
+ rootFS := img.RootFS
+ if rootFS == nil {
+ rootFS = NewRootFS()
+ }
+ if !isEmptyLayer {
+ rootFS.Append(child.DiffID)
+ }
+ imgHistory := NewHistory(
+ child.Author,
+ child.Comment,
+ strings.Join(child.ContainerConfig.Cmd, " "),
+ isEmptyLayer)
+
+ return &Image{
+ V1Image: V1Image{
+ DockerVersion: dockerversion.Version,
+ Config: child.Config,
+ Architecture: runtime.GOARCH,
+ OS: platform,
+ Container: child.ContainerID,
+ ContainerConfig: *child.ContainerConfig,
+ Author: child.Author,
+ Created: imgHistory.Created,
+ },
+ RootFS: rootFS,
+ History: append(img.History, imgHistory),
+ OSFeatures: img.OSFeatures,
+ OSVersion: img.OSVersion,
+ }
+}
+
+// History stores build commands that were used to create an image
+type History struct {
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // CreatedBy keeps the Dockerfile command used while building the image
+ CreatedBy string `json:"created_by,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // EmptyLayer is set to true if this history item did not generate a
+ // layer. Otherwise, the history item is associated with the next
+ // layer in the RootFS section.
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+// NewHistory creates a new history struct from arguments, and sets the created
+// time to the current time in UTC
+func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History {
+ return History{
+ Author: author,
+ Created: time.Now().UTC(),
+ CreatedBy: createdBy,
+ Comment: comment,
+ EmptyLayer: isEmptyLayer,
+ }
+}
+
+// Exporter provides interface for loading and saving images
+type Exporter interface {
+ Load(io.ReadCloser, io.Writer, bool) error
+ // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error
+ Save([]string, io.Writer) error
+}
+
+// NewFromJSON creates an Image configuration from json.
+func NewFromJSON(src []byte) (*Image, error) {
+ img := &Image{}
+
+ if err := json.Unmarshal(src, img); err != nil {
+ return nil, err
+ }
+ if img.RootFS == nil {
+ return nil, errors.New("invalid image JSON, no RootFS key")
+ }
+
+ img.rawJSON = src
+
+ return img, nil
+}
diff --git a/image/image_test.go b/image/image_test.go
new file mode 100644
index 0000000..1455947
--- /dev/null
+++ b/image/image_test.go
@@ -0,0 +1,53 @@
+package image
+
+import (
+ "encoding/json"
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const sampleImageJSON = `{
+ "architecture": "amd64",
+ "os": "linux",
+ "config": {},
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ }
+}`
+
+func TestNewFromJSON(t *testing.T) {
+ img, err := NewFromJSON([]byte(sampleImageJSON))
+ require.NoError(t, err)
+ assert.Equal(t, sampleImageJSON, string(img.RawJSON()))
+}
+
+func TestNewFromJSONWithInvalidJSON(t *testing.T) {
+ _, err := NewFromJSON([]byte("{}"))
+ assert.EqualError(t, err, "invalid image JSON, no RootFS key")
+}
+
+func TestMarshalKeyOrder(t *testing.T) {
+ b, err := json.Marshal(&Image{
+ V1Image: V1Image{
+ Comment: "a",
+ Author: "b",
+ Architecture: "c",
+ },
+ })
+ assert.NoError(t, err)
+
+ expectedOrder := []string{"architecture", "author", "comment"}
+ var indexes []int
+ for _, k := range expectedOrder {
+ indexes = append(indexes, strings.Index(string(b), k))
+ }
+
+ if !sort.IntsAreSorted(indexes) {
+ t.Fatal("invalid key order in JSON: ", string(b))
+ }
+}
diff --git a/image/rootfs.go b/image/rootfs.go
new file mode 100644
index 0000000..7b24e3e
--- /dev/null
+++ b/image/rootfs.go
@@ -0,0 +1,44 @@
+package image
+
+import (
+ "runtime"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/layer"
+)
+
+// TypeLayers is used for RootFS.Type for filesystems organized into layers.
+const TypeLayers = "layers"
+
+// typeLayersWithBase is an older format used by Windows up to v1.12. We
+// explicitly handle this as an error case to ensure that a daemon which still
+// has an older image like this on disk can still start, even though the
+// image itself is not usable. See https://github.com/docker/docker/pull/25806.
+const typeLayersWithBase = "layers+base"
+
+// RootFS describes images root filesystem
+// This is currently a placeholder that only supports layers. In the future
+// this can be made into an interface that supports different implementations.
+type RootFS struct {
+ Type string `json:"type"`
+ DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`
+}
+
+// NewRootFS returns empty RootFS struct
+func NewRootFS() *RootFS {
+ return &RootFS{Type: TypeLayers}
+}
+
+// Append appends a new diffID to rootfs
+func (r *RootFS) Append(id layer.DiffID) {
+ r.DiffIDs = append(r.DiffIDs, id)
+}
+
+// ChainID returns the ChainID for the top layer in RootFS.
+func (r *RootFS) ChainID() layer.ChainID {
+ if runtime.GOOS == "windows" && r.Type == typeLayersWithBase {
+ logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs)
+ return ""
+ }
+ return layer.CreateChainID(r.DiffIDs)
+}
diff --git a/image/spec/v1.1.md b/image/spec/v1.1.md
new file mode 100644
index 0000000..ce761f1
--- /dev/null
+++ b/image/spec/v1.1.md
@@ -0,0 +1,637 @@
+# Docker Image Specification v1.1.0
+
+An *Image* is an ordered collection of root filesystem changes and the
+corresponding execution parameters for use within a container runtime. This
+specification outlines the format of these filesystem changes and corresponding
+parameters and describes how to create and use them for use with a container
+runtime and execution tool.
+
+This version of the image specification was adopted starting in Docker 1.10.
+
+## Terminology
+
+This specification uses the following terms:
+
+<dl>
+ <dt>
+ Layer
+ </dt>
+ <dd>
+ Images are composed of <i>layers</i>. Each layer is a set of filesystem
+ changes. Layers do not have configuration metadata such as environment
+ variables or default arguments - these are properties of the image as a
+ whole rather than any particular layer.
+ </dd>
+ <dt>
+ Image JSON
+ </dt>
+ <dd>
+ Each image has an associated JSON structure which describes some
+ basic information about the image such as date created, author, and the
+ ID of its parent image as well as execution/runtime configuration like
+ its entry point, default arguments, CPU/memory shares, networking, and
+ volumes. The JSON structure also references a cryptographic hash of
+ each layer used by the image, and provides history information for
+ those layers. This JSON is considered to be immutable, because changing
+ it would change the computed ImageID. Changing it means creating a new
+ derived image, instead of changing the existing image.
+ </dd>
+ <dt>
+ Image Filesystem Changeset
+ </dt>
+ <dd>
+ Each layer has an archive of the files which have been added, changed,
+ or deleted relative to its parent layer. Using a layer-based or union
+ filesystem such as AUFS, or by computing the diff from filesystem
+ snapshots, the filesystem changeset can be used to present a series of
+ image layers as if they were one cohesive filesystem.
+ </dd>
+ <dt>
+ Layer DiffID
+ </dt>
+ <dd>
+ Layers are referenced by cryptographic hashes of their serialized
+ representation. This is a SHA256 digest over the tar archive used to
+ transport the layer, represented as a hexadecimal encoding of 256 bits, e.g.,
+ <code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
+ Layers must be packed and unpacked reproducibly to avoid changing the
+ layer ID, for example by using tar-split to save the tar headers. Note
+ that the digest used as the layer ID is taken over an uncompressed
+ version of the tar.
+ </dd>
+ <dt>
+ Layer ChainID
+ </dt>
+ <dd>
+ For convenience, it is sometimes useful to refer to a stack of layers
+ with a single identifier. This is called a <code>ChainID</code>. For a
+ single layer (or the layer at the bottom of a stack), the
+ <code>ChainID</code> is equal to the layer's <code>DiffID</code>.
+ Otherwise the <code>ChainID</code> is given by the formula:
+ <code>ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN))</code>.
+ </dd>
+ <dt>
+ ImageID <a name="id_desc"></a>
+ </dt>
+ <dd>
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is
+ represented as a hexadecimal encoding of 256 bits, e.g.,
+ <code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
+ Since the configuration JSON that gets hashed references hashes of each
+ layer in the image, this formulation of the ImageID makes images
+ content-addressable.
+ </dd>
+ <dt>
+ Tag
+ </dt>
+ <dd>
+ A tag serves to map a descriptive, user-given name to any single image
+ ID. Tag values are limited to the set of characters
+ <code>[a-zA-Z0-9_.-]</code>, except they may not start with a <code>.</code>
+ or <code>-</code> character. Tags are limited to 128 characters.
+ </dd>
+ <dt>
+ Repository
+ </dt>
+ <dd>
+ A collection of tags grouped under a common prefix (the name component
+ before <code>:</code>). For example, in an image tagged with the name
+ <code>my-app:3.1.4</code>, <code>my-app</code> is the <i>Repository</i>
+ component of the name. A repository name is made up of slash-separated
+ name components, optionally prefixed by a DNS hostname. The hostname
+ must comply with standard DNS rules, but may not contain
+ <code>_</code> characters. If a hostname is present, it may optionally
+ be followed by a port number in the format <code>:8080</code>.
+ Name components may contain lowercase characters, digits, and
+ separators. A separator is defined as a period, one or two underscores,
+ or one or more dashes. A name component may not start or end with
+ a separator.
+ </dd>
+</dl>
+
+## Image JSON Description
+
+Here is an example image JSON file:
+
+```
+{
+ "created": "2015-10-31T22:22:56.015925234Z",
+ "author": "Alyssa P. Hacker &ltalyspdev@example.com&gt",
+ "architecture": "amd64",
+ "os": "linux",
+ "config": {
+ "User": "alice",
+ "Memory": 2048,
+ "MemorySwap": 4096,
+ "CpuShares": 8,
+ "ExposedPorts": {
+ "8080/tcp": {}
+ },
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "FOO=docker_is_a_really",
+ "BAR=great_tool_you_know"
+ ],
+ "Entrypoint": [
+ "/bin/my-app-binary"
+ ],
+ "Cmd": [
+ "--foreground",
+ "--config",
+ "/etc/my-app.d/default.cfg"
+ ],
+ "Volumes": {
+ "/var/job-result-data": {},
+ "/var/log/my-app-logs": {},
+ },
+ "WorkingDir": "/home/alice",
+ },
+ "rootfs": {
+ "diff_ids": [
+ "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+ "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ ],
+ "type": "layers"
+ },
+ "history": [
+ {
+ "created": "2015-10-31T22:22:54.690851953Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+ },
+ {
+ "created": "2015-10-31T22:22:55.613815829Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+ "empty_layer": true
+ }
+ ]
+}
+```
+
+Note that image JSON files produced by Docker don't contain formatting
+whitespace. It has been added to this example for clarity.
+
+### Image JSON Field Descriptions
+
+<dl>
+ <dt>
+ created <code>string</code>
+ </dt>
+ <dd>
+ ISO-8601 formatted combined date and time at which the image was
+ created.
+ </dd>
+ <dt>
+ author <code>string</code>
+ </dt>
+ <dd>
+ Gives the name and/or email address of the person or entity which
+ created and is responsible for maintaining the image.
+ </dd>
+ <dt>
+ architecture <code>string</code>
+ </dt>
+ <dd>
+ The CPU architecture which the binaries in this image are built to run
+ on. Possible values include:
+ <ul>
+ <li>386</li>
+ <li>amd64</li>
+ <li>arm</li>
+ </ul>
+ More values may be supported in the future and any of these may or may
+ not be supported by a given container runtime implementation.
+ </dd>
+ <dt>
+ os <code>string</code>
+ </dt>
+ <dd>
+ The name of the operating system which the image is built to run on.
+ Possible values include:
+ <ul>
+ <li>darwin</li>
+ <li>freebsd</li>
+ <li>linux</li>
+ </ul>
+ More values may be supported in the future and any of these may or may
+ not be supported by a given container runtime implementation.
+ </dd>
+ <dt>
+ config <code>struct</code>
+ </dt>
+ <dd>
+ The execution parameters which should be used as a base when running a
+ container using the image. This field can be <code>null</code>, in
+ which case any execution parameters should be specified at creation of
+ the container.
+
+ <h4>Container RunConfig Field Descriptions</h4>
+
+ <dl>
+ <dt>
+ User <code>string</code>
+ </dt>
+ <dd>
+ <p>The username or UID which the process in the container should
+ run as. This acts as a default value to use when the value is
+ not specified when creating a container.</p>
+
+ <p>All of the following are valid:</p>
+
+ <ul>
+ <li><code>user</code></li>
+ <li><code>uid</code></li>
+ <li><code>user:group</code></li>
+ <li><code>uid:gid</code></li>
+ <li><code>uid:group</code></li>
+ <li><code>user:gid</code></li>
+ </ul>
+
+ <p>If <code>group</code>/<code>gid</code> is not specified, the
+ default group and supplementary groups of the given
+ <code>user</code>/<code>uid</code> in <code>/etc/passwd</code>
+ from the container are applied.</p>
+ </dd>
+ <dt>
+ Memory <code>integer</code>
+ </dt>
+ <dd>
+ Memory limit (in bytes). This acts as a default value to use
+ when the value is not specified when creating a container.
+ </dd>
+ <dt>
+ MemorySwap <code>integer</code>
+ </dt>
+ <dd>
+ Total memory usage (memory + swap); set to <code>-1</code> to
+ disable swap. This acts as a default value to use when the
+ value is not specified when creating a container.
+ </dd>
+ <dt>
+ CpuShares <code>integer</code>
+ </dt>
+ <dd>
+ CPU shares (relative weight vs. other containers). This acts as
+ a default value to use when the value is not specified when
+ creating a container.
+ </dd>
+ <dt>
+ ExposedPorts <code>struct</code>
+ </dt>
+ <dd>
+ A set of ports to expose from a container running this image.
+ This JSON structure value is unusual because it is a direct
+ JSON serialization of the Go type
+ <code>map[string]struct{}</code> and is represented in JSON as
+ an object mapping its keys to an empty object. Here is an
+ example:
+
+<pre>{
+ "8080": {},
+ "53/udp": {},
+ "2356/tcp": {}
+}</pre>
+
+ Its keys can be in the format of:
+ <ul>
+ <li>
+ <code>"port/tcp"</code>
+ </li>
+ <li>
+ <code>"port/udp"</code>
+ </li>
+ <li>
+ <code>"port"</code>
+ </li>
+ </ul>
+ with the default protocol being <code>"tcp"</code> if not
+ specified.
+
+ These values act as defaults and are merged with any specified
+ when creating a container.
+ </dd>
+ <dt>
+ Env <code>array of strings</code>
+ </dt>
+ <dd>
+ Entries are in the format of <code>VARNAME="var value"</code>.
+ These values act as defaults and are merged with any specified
+ when creating a container.
+ </dd>
+ <dt>
+ Entrypoint <code>array of strings</code>
+ </dt>
+ <dd>
+ A list of arguments to use as the command to execute when the
+ container starts. This value acts as a default and is replaced
+ by an entrypoint specified when creating a container.
+ </dd>
+ <dt>
+ Cmd <code>array of strings</code>
+ </dt>
+ <dd>
+ Default arguments to the entry point of the container. These
+ values act as defaults and are replaced with any specified when
+ creating a container. If an <code>Entrypoint</code> value is
+ not specified, then the first entry of the <code>Cmd</code>
+ array should be interpreted as the executable to run.
+ </dd>
+ <dt>
+ Volumes <code>struct</code>
+ </dt>
+ <dd>
+ A set of directories which should be created as data volumes in
+ a container running this image. This JSON structure value is
+ unusual because it is a direct JSON serialization of the Go
+ type <code>map[string]struct{}</code> and is represented in
+ JSON as an object mapping its keys to an empty object. Here is
+ an example:
+<pre>{
+ "/var/my-app-data/": {},
+ "/etc/some-config.d/": {},
+}</pre>
+ </dd>
+ <dt>
+ WorkingDir <code>string</code>
+ </dt>
+ <dd>
+ Sets the current working directory of the entry point process
+ in the container. This value acts as a default and is replaced
+ by a working directory specified when creating a container.
+ </dd>
+ </dl>
+ </dd>
+ <dt>
+ rootfs <code>struct</code>
+ </dt>
+ <dd>
+ The rootfs key references the layer content addresses used by the
+ image. This makes the image config hash depend on the filesystem hash.
+ rootfs has two subkeys:
+
+ <ul>
+ <li>
+ <code>type</code> is usually set to <code>layers</code>.
+ </li>
+ <li>
+ <code>diff_ids</code> is an array of layer content hashes (<code>DiffIDs</code>), in order from bottom-most to top-most.
+ </li>
+ </ul>
+
+
+ Here is an example rootfs section:
+
+<pre>"rootfs": {
+ "diff_ids": [
+ "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+ "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+ "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+ ],
+ "type": "layers"
+}</pre>
+ </dd>
+ <dt>
+ history <code>struct</code>
+ </dt>
+ <dd>
+ <code>history</code> is an array of objects describing the history of
+ each layer. The array is ordered from bottom-most layer to top-most
+ layer. The object has the following fields.
+
+ <ul>
+ <li>
+ <code>created</code>: Creation time, expressed as a ISO-8601 formatted
+ combined date and time
+ </li>
+ <li>
+ <code>author</code>: The author of the build point
+ </li>
+ <li>
+ <code>created_by</code>: The command which created the layer
+ </li>
+ <li>
+ <code>comment</code>: A custom message set when creating the layer
+ </li>
+ <li>
+ <code>empty_layer</code>: This field is used to mark if the history
+ item created a filesystem diff. It is set to true if this history
+ item doesn't correspond to an actual layer in the rootfs section
+ (for example, a command like ENV which results in no change to the
+ filesystem).
+ </li>
+ </ul>
+
+Here is an example history section:
+
+<pre>"history": [
+ {
+ "created": "2015-10-31T22:22:54.690851953Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+ },
+ {
+ "created": "2015-10-31T22:22:55.613815829Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+ "empty_layer": true
+ }
+]</pre>
+ </dd>
+</dl>
+
+Any extra fields in the Image JSON struct are considered implementation
+specific and should be ignored by any implementations which are unable to
+interpret them.
+
+## Creating an Image Filesystem Changeset
+
+An example of creating an Image Filesystem Changeset follows.
+
+An image root filesystem is first created as an empty directory. Here is the
+initial empty directory structure for the a changeset using the
+randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are
+generated based on the content](#id_desc)).
+
+```
+c3167915dc9d/
+```
+
+Files and directories are then created:
+
+```
+c3167915dc9d/
+ etc/
+ my-app-config
+ bin/
+ my-app-binary
+ my-app-tools
+```
+
+The `c3167915dc9d` directory is then committed as a plain Tar archive with
+entries for the following files:
+
+```
+etc/my-app-config
+bin/my-app-binary
+bin/my-app-tools
+```
+
+To make changes to the filesystem of this container image, create a new
+directory, such as `f60c56784b83`, and initialize it with a snapshot of the
+parent image's root filesystem, so that the directory is identical to that
+of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very
+efficient:
+
+```
+f60c56784b83/
+ etc/
+ my-app-config
+ bin/
+ my-app-binary
+ my-app-tools
+```
+
+This example change is going add a configuration directory at `/etc/my-app.d`
+which contains a default config file. There's also a change to the
+`my-app-tools` binary to handle the config layout change. The `f60c56784b83`
+directory then looks like this:
+
+```
+f60c56784b83/
+ etc/
+ my-app.d/
+ default.cfg
+ bin/
+ my-app-binary
+ my-app-tools
+```
+
+This reflects the removal of `/etc/my-app-config` and creation of a file and
+directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been
+replaced with an updated version. Before committing this directory to a
+changeset, because it has a parent image, it is first compared with the
+directory tree of the parent snapshot, `f60c56784b83`, looking for files and
+directories that have been added, modified, or removed. The following changeset
+is found:
+
+```
+Added: /etc/my-app.d/default.cfg
+Modified: /bin/my-app-tools
+Deleted: /etc/my-app-config
+```
+
+A Tar Archive is then created which contains *only* this changeset: The added
+and modified files and directories in their entirety, and for each deleted item
+an entry for an empty file at the same location but with the basename of the
+deleted file or directory prefixed with `.wh.`. The filenames prefixed with
+`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible
+to create an image root filesystem which contains a file or directory with a
+name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has
+the following entries:
+
+```
+/etc/my-app.d/default.cfg
+/bin/my-app-tools
+/etc/.wh.my-app-config
+```
+
+Any given image is likely to be composed of several of these Image Filesystem
+Changeset tar archives.
+
+## Combined Image JSON + Filesystem Changeset Format
+
+There is also a format for a single archive which contains complete information
+about an image, including:
+
+ - repository names/tags
+ - image configuration JSON file
+ - all tar archives of each layer filesystem changesets
+
+For example, here's what the full archive of `library/busybox` is (displayed in
+`tree` format):
+
+```
+.
+├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json
+├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a
+│   ├── VERSION
+│   ├── json
+│   └── layer.tar
+├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198
+│   ├── VERSION
+│   ├── json
+│   └── layer.tar
+├── manifest.json
+└── repositories
+```
+
+There is a directory for each layer in the image. Each directory is named with
+a 64 character hex name that is deterministically generated from the layer
+information. These names are not necessarily layer DiffIDs or ChainIDs. Each of
+these directories contains 3 files:
+
+ * `VERSION` - The schema version of the `json` file
+ * `json` - The legacy JSON metadata for an image layer. In this version of
+ the image specification, layers don't have JSON metadata, but in
+ [version 1](v1.md), they did. A file is created for each layer in the
+ v1 format for backward compatibility.
+ * `layer.tar` - The Tar archive of the filesystem changeset for an image
+ layer.
+
+Note that this directory layout is only important for backward compatibility.
+Current implementations use the paths specified in `manifest.json`.
+
+The content of the `VERSION` files is simply the semantic version of the JSON
+metadata schema:
+
+```
+1.0
+```
+
+The `repositories` file is another JSON file which describes names/tags:
+
+```
+{
+ "busybox":{
+ "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a"
+ }
+}
+```
+
+Every key in this object is the name of a repository, and maps to a collection
+of tag suffixes. Each tag maps to the ID of the image represented by that tag.
+This file is only used for backwards compatibility. Current implementations use
+the `manifest.json` file instead.
+
+The `manifest.json` file provides the image JSON for the top-level image, and
+optionally for parent images that this image was derived from. It consists of
+an array of metadata entries:
+
+```
+[
+ {
+ "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json",
+ "RepoTags": ["busybox:latest"],
+ "Layers": [
+ "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar",
+ "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar"
+ ]
+ }
+]
+```
+
+There is an entry in the array for each image.
+
+The `Config` field references another file in the tar which includes the image
+JSON for this image.
+
+The `RepoTags` field lists references pointing to this image.
+
+The `Layers` field points to the filesystem changeset tars.
+
+An optional `Parent` field references the imageID of the parent image. This
+parent must be part of the same `manifest.json` file.
+
+This file shouldn't be confused with the distribution manifest, used to push
+and pull images.
+
+Generally, implementations that support this version of the spec will use
+the `manifest.json` file if available, and older implementations will use the
+legacy `*/json` files and `repositories`.
diff --git a/image/spec/v1.2.md b/image/spec/v1.2.md
new file mode 100644
index 0000000..789680c
--- /dev/null
+++ b/image/spec/v1.2.md
@@ -0,0 +1,696 @@
+# Docker Image Specification v1.2.0
+
+An *Image* is an ordered collection of root filesystem changes and the
+corresponding execution parameters for use within a container runtime. This
+specification outlines the format of these filesystem changes and corresponding
+parameters and describes how to create and use them for use with a container
+runtime and execution tool.
+
+This version of the image specification was adopted starting in Docker 1.12.
+
+## Terminology
+
+This specification uses the following terms:
+
+<dl>
+ <dt>
+ Layer
+ </dt>
+ <dd>
+ Images are composed of <i>layers</i>. Each layer is a set of filesystem
+ changes. Layers do not have configuration metadata such as environment
+ variables or default arguments - these are properties of the image as a
+ whole rather than any particular layer.
+ </dd>
+ <dt>
+ Image JSON
+ </dt>
+ <dd>
+ Each image has an associated JSON structure which describes some
+ basic information about the image such as date created, author, and the
+ ID of its parent image as well as execution/runtime configuration like
+ its entry point, default arguments, CPU/memory shares, networking, and
+ volumes. The JSON structure also references a cryptographic hash of
+ each layer used by the image, and provides history information for
+ those layers. This JSON is considered to be immutable, because changing
+ it would change the computed ImageID. Changing it means creating a new
+ derived image, instead of changing the existing image.
+ </dd>
+ <dt>
+ Image Filesystem Changeset
+ </dt>
+ <dd>
+ Each layer has an archive of the files which have been added, changed,
+ or deleted relative to its parent layer. Using a layer-based or union
+ filesystem such as AUFS, or by computing the diff from filesystem
+ snapshots, the filesystem changeset can be used to present a series of
+ image layers as if they were one cohesive filesystem.
+ </dd>
+ <dt>
+ Layer DiffID
+ </dt>
+ <dd>
+ Layers are referenced by cryptographic hashes of their serialized
+ representation. This is a SHA256 digest over the tar archive used to
+ transport the layer, represented as a hexadecimal encoding of 256 bits, e.g.,
+ <code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
+ Layers must be packed and unpacked reproducibly to avoid changing the
+ layer ID, for example by using tar-split to save the tar headers. Note
+ that the digest used as the layer ID is taken over an uncompressed
+ version of the tar.
+ </dd>
+ <dt>
+ Layer ChainID
+ </dt>
+ <dd>
+ For convenience, it is sometimes useful to refer to a stack of layers
+ with a single identifier. This is called a <code>ChainID</code>. For a
+ single layer (or the layer at the bottom of a stack), the
+ <code>ChainID</code> is equal to the layer's <code>DiffID</code>.
+ Otherwise the <code>ChainID</code> is given by the formula:
+ <code>ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN))</code>.
+ </dd>
+ <dt>
+ ImageID <a name="id_desc"></a>
+ </dt>
+ <dd>
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is
+ represented as a hexadecimal encoding of 256 bits, e.g.,
+ <code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
+ Since the configuration JSON that gets hashed references hashes of each
+ layer in the image, this formulation of the ImageID makes images
+ content-addressable.
+ </dd>
+ <dt>
+ Tag
+ </dt>
+ <dd>
+ A tag serves to map a descriptive, user-given name to any single image
+ ID. Tag values are limited to the set of characters
+ <code>[a-zA-Z0-9_.-]</code>, except they may not start with a <code>.</code>
+ or <code>-</code> character. Tags are limited to 128 characters.
+ </dd>
+ <dt>
+ Repository
+ </dt>
+ <dd>
+ A collection of tags grouped under a common prefix (the name component
+ before <code>:</code>). For example, in an image tagged with the name
+ <code>my-app:3.1.4</code>, <code>my-app</code> is the <i>Repository</i>
+ component of the name. A repository name is made up of slash-separated
+ name components, optionally prefixed by a DNS hostname. The hostname
+ must comply with standard DNS rules, but may not contain
+ <code>_</code> characters. If a hostname is present, it may optionally
+ be followed by a port number in the format <code>:8080</code>.
+ Name components may contain lowercase characters, digits, and
+ separators. A separator is defined as a period, one or two underscores,
+ or one or more dashes. A name component may not start or end with
+ a separator.
+ </dd>
+</dl>
+
+## Image JSON Description
+
+Here is an example image JSON file:
+
+```
+{
+ "created": "2015-10-31T22:22:56.015925234Z",
+ "author": "Alyssa P. Hacker &ltalyspdev@example.com&gt",
+ "architecture": "amd64",
+ "os": "linux",
+ "config": {
+ "User": "alice",
+ "Memory": 2048,
+ "MemorySwap": 4096,
+ "CpuShares": 8,
+ "ExposedPorts": {
+ "8080/tcp": {}
+ },
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "FOO=docker_is_a_really",
+ "BAR=great_tool_you_know"
+ ],
+ "Entrypoint": [
+ "/bin/my-app-binary"
+ ],
+ "Cmd": [
+ "--foreground",
+ "--config",
+ "/etc/my-app.d/default.cfg"
+ ],
+ "Volumes": {
+ "/var/job-result-data": {},
+ "/var/log/my-app-logs": {},
+ },
+ "WorkingDir": "/home/alice",
+ },
+ "rootfs": {
+ "diff_ids": [
+ "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+ "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ ],
+ "type": "layers"
+ },
+ "history": [
+ {
+ "created": "2015-10-31T22:22:54.690851953Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+ },
+ {
+ "created": "2015-10-31T22:22:55.613815829Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+ "empty_layer": true
+ }
+ ]
+}
+```
+
+Note that image JSON files produced by Docker don't contain formatting
+whitespace. It has been added to this example for clarity.
+
+### Image JSON Field Descriptions
+
+<dl>
+ <dt>
+ created <code>string</code>
+ </dt>
+ <dd>
+ ISO-8601 formatted combined date and time at which the image was
+ created.
+ </dd>
+ <dt>
+ author <code>string</code>
+ </dt>
+ <dd>
+ Gives the name and/or email address of the person or entity which
+ created and is responsible for maintaining the image.
+ </dd>
+ <dt>
+ architecture <code>string</code>
+ </dt>
+ <dd>
+ The CPU architecture which the binaries in this image are built to run
+ on. Possible values include:
+ <ul>
+ <li>386</li>
+ <li>amd64</li>
+ <li>arm</li>
+ </ul>
+ More values may be supported in the future and any of these may or may
+ not be supported by a given container runtime implementation.
+ </dd>
+ <dt>
+ os <code>string</code>
+ </dt>
+ <dd>
+ The name of the operating system which the image is built to run on.
+ Possible values include:
+ <ul>
+ <li>darwin</li>
+ <li>freebsd</li>
+ <li>linux</li>
+ </ul>
+ More values may be supported in the future and any of these may or may
+ not be supported by a given container runtime implementation.
+ </dd>
+ <dt>
+ config <code>struct</code>
+ </dt>
+ <dd>
+ The execution parameters which should be used as a base when running a
+ container using the image. This field can be <code>null</code>, in
+ which case any execution parameters should be specified at creation of
+ the container.
+
+ <h4>Container RunConfig Field Descriptions</h4>
+
+ <dl>
+ <dt>
+ User <code>string</code>
+ </dt>
+ <dd>
+ <p>The username or UID which the process in the container should
+ run as. This acts as a default value to use when the value is
+ not specified when creating a container.</p>
+
+ <p>All of the following are valid:</p>
+
+ <ul>
+ <li><code>user</code></li>
+ <li><code>uid</code></li>
+ <li><code>user:group</code></li>
+ <li><code>uid:gid</code></li>
+ <li><code>uid:group</code></li>
+ <li><code>user:gid</code></li>
+ </ul>
+
+ <p>If <code>group</code>/<code>gid</code> is not specified, the
+ default group and supplementary groups of the given
+ <code>user</code>/<code>uid</code> in <code>/etc/passwd</code>
+ from the container are applied.</p>
+ </dd>
+ <dt>
+ Memory <code>integer</code>
+ </dt>
+ <dd>
+ Memory limit (in bytes). This acts as a default value to use
+ when the value is not specified when creating a container.
+ </dd>
+ <dt>
+ MemorySwap <code>integer</code>
+ </dt>
+ <dd>
+ Total memory usage (memory + swap); set to <code>-1</code> to
+ disable swap. This acts as a default value to use when the
+ value is not specified when creating a container.
+ </dd>
+ <dt>
+ CpuShares <code>integer</code>
+ </dt>
+ <dd>
+ CPU shares (relative weight vs. other containers). This acts as
+ a default value to use when the value is not specified when
+ creating a container.
+ </dd>
+ <dt>
+ ExposedPorts <code>struct</code>
+ </dt>
+ <dd>
+ A set of ports to expose from a container running this image.
+ This JSON structure value is unusual because it is a direct
+ JSON serialization of the Go type
+ <code>map[string]struct{}</code> and is represented in JSON as
+ an object mapping its keys to an empty object. Here is an
+ example:
+
+<pre>{
+ "8080": {},
+ "53/udp": {},
+ "2356/tcp": {}
+}</pre>
+
+ Its keys can be in the format of:
+ <ul>
+ <li>
+ <code>"port/tcp"</code>
+ </li>
+ <li>
+ <code>"port/udp"</code>
+ </li>
+ <li>
+ <code>"port"</code>
+ </li>
+ </ul>
+ with the default protocol being <code>"tcp"</code> if not
+ specified.
+
+ These values act as defaults and are merged with any specified
+ when creating a container.
+ </dd>
+ <dt>
+ Env <code>array of strings</code>
+ </dt>
+ <dd>
+ Entries are in the format of <code>VARNAME="var value"</code>.
+ These values act as defaults and are merged with any specified
+ when creating a container.
+ </dd>
+ <dt>
+ Entrypoint <code>array of strings</code>
+ </dt>
+ <dd>
+ A list of arguments to use as the command to execute when the
+ container starts. This value acts as a default and is replaced
+ by an entrypoint specified when creating a container.
+ </dd>
+ <dt>
+ Cmd <code>array of strings</code>
+ </dt>
+ <dd>
+ Default arguments to the entry point of the container. These
+ values act as defaults and are replaced with any specified when
+ creating a container. If an <code>Entrypoint</code> value is
+ not specified, then the first entry of the <code>Cmd</code>
+ array should be interpreted as the executable to run.
+ </dd>
+ <dt>
+ Healthcheck <code>struct</code>
+ </dt>
+ <dd>
+ A test to perform to determine whether the container is healthy.
+ Here is an example:
+<pre>{
+ "Test": [
+ "CMD-SHELL",
+ "/usr/bin/check-health localhost"
+ ],
+ "Interval": 30000000000,
+ "Timeout": 10000000000,
+ "Retries": 3
+}</pre>
+ The object has the following fields.
+ <dl>
+ <dt>
+ Test <code>array of strings</code>
+ </dt>
+ <dd>
+ The test to perform to check that the container is healthy.
+ The options are:
+ <ul>
+ <li><code>[]</code> : inherit healthcheck from base image</li>
+ <li><code>["NONE"]</code> : disable healthcheck</li>
+ <li><code>["CMD", arg1, arg2, ...]</code> : exec arguments directly</li>
+ <li><code>["CMD-SHELL", command]</code> : run command with system's default shell</li>
+ </ul>
+
+ The test command should exit with a status of 0 if the container is healthy,
+ or with 1 if it is unhealthy.
+ </dd>
+ <dt>
+ Interval <code>integer</code>
+ </dt>
+ <dd>
+ Number of nanoseconds to wait between probe attempts.
+ </dd>
+ <dt>
+ Timeout <code>integer</code>
+ </dt>
+ <dd>
+ Number of nanoseconds to wait before considering the check to have hung.
+ </dd>
+ <dt>
+ Retries <code>integer</code>
+ <dt>
+ <dd>
+ The number of consecutive failures needed to consider a container as unhealthy.
+ </dd>
+ </dl>
+
+ In each case, the field can be omitted to indicate that the
+ value should be inherited from the base layer.
+
+ These values act as defaults and are merged with any specified
+ when creating a container.
+ </dd>
+ <dt>
+ Volumes <code>struct</code>
+ </dt>
+ <dd>
+ A set of directories which should be created as data volumes in
+ a container running this image. This JSON structure value is
+ unusual because it is a direct JSON serialization of the Go
+ type <code>map[string]struct{}</code> and is represented in
+ JSON as an object mapping its keys to an empty object. Here is
+ an example:
+<pre>{
+ "/var/my-app-data/": {},
+ "/etc/some-config.d/": {},
+}</pre>
+ </dd>
+ <dt>
+ WorkingDir <code>string</code>
+ </dt>
+ <dd>
+ Sets the current working directory of the entry point process
+ in the container. This value acts as a default and is replaced
+ by a working directory specified when creating a container.
+ </dd>
+ </dl>
+ </dd>
+ <dt>
+ rootfs <code>struct</code>
+ </dt>
+ <dd>
+ The rootfs key references the layer content addresses used by the
+ image. This makes the image config hash depend on the filesystem hash.
+ rootfs has two subkeys:
+
+ <ul>
+ <li>
+ <code>type</code> is usually set to <code>layers</code>.
+ </li>
+ <li>
+ <code>diff_ids</code> is an array of layer content hashes (<code>DiffIDs</code>), in order from bottom-most to top-most.
+ </li>
+ </ul>
+
+
+ Here is an example rootfs section:
+
+<pre>"rootfs": {
+ "diff_ids": [
+ "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+ "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+ "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+ ],
+ "type": "layers"
+}</pre>
+ </dd>
+ <dt>
+ history <code>struct</code>
+ </dt>
+ <dd>
+ <code>history</code> is an array of objects describing the history of
+ each layer. The array is ordered from bottom-most layer to top-most
+ layer. The object has the following fields.
+
+ <ul>
+ <li>
+ <code>created</code>: Creation time, expressed as a ISO-8601 formatted
+ combined date and time
+ </li>
+ <li>
+ <code>author</code>: The author of the build point
+ </li>
+ <li>
+ <code>created_by</code>: The command which created the layer
+ </li>
+ <li>
+ <code>comment</code>: A custom message set when creating the layer
+ </li>
+ <li>
+ <code>empty_layer</code>: This field is used to mark if the history
+ item created a filesystem diff. It is set to true if this history
+ item doesn't correspond to an actual layer in the rootfs section
+ (for example, a command like ENV which results in no change to the
+ filesystem).
+ </li>
+ </ul>
+
+Here is an example history section:
+
+<pre>"history": [
+ {
+ "created": "2015-10-31T22:22:54.690851953Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+ },
+ {
+ "created": "2015-10-31T22:22:55.613815829Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+ "empty_layer": true
+ }
+]</pre>
+ </dd>
+</dl>
+
+Any extra fields in the Image JSON struct are considered implementation
+specific and should be ignored by any implementations which are unable to
+interpret them.
+
+## Creating an Image Filesystem Changeset
+
+An example of creating an Image Filesystem Changeset follows.
+
+An image root filesystem is first created as an empty directory. Here is the
+initial empty directory structure for the a changeset using the
+randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are
+generated based on the content](#id_desc)).
+
+```
+c3167915dc9d/
+```
+
+Files and directories are then created:
+
+```
+c3167915dc9d/
+ etc/
+ my-app-config
+ bin/
+ my-app-binary
+ my-app-tools
+```
+
+The `c3167915dc9d` directory is then committed as a plain Tar archive with
+entries for the following files:
+
+```
+etc/my-app-config
+bin/my-app-binary
+bin/my-app-tools
+```
+
+To make changes to the filesystem of this container image, create a new
+directory, such as `f60c56784b83`, and initialize it with a snapshot of the
+parent image's root filesystem, so that the directory is identical to that
+of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very
+efficient:
+
+```
+f60c56784b83/
+ etc/
+ my-app-config
+ bin/
+ my-app-binary
+ my-app-tools
+```
+
+This example change is going add a configuration directory at `/etc/my-app.d`
+which contains a default config file. There's also a change to the
+`my-app-tools` binary to handle the config layout change. The `f60c56784b83`
+directory then looks like this:
+
+```
+f60c56784b83/
+ etc/
+ my-app.d/
+ default.cfg
+ bin/
+ my-app-binary
+ my-app-tools
+```
+
+This reflects the removal of `/etc/my-app-config` and creation of a file and
+directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been
+replaced with an updated version. Before committing this directory to a
+changeset, because it has a parent image, it is first compared with the
+directory tree of the parent snapshot, `f60c56784b83`, looking for files and
+directories that have been added, modified, or removed. The following changeset
+is found:
+
+```
+Added: /etc/my-app.d/default.cfg
+Modified: /bin/my-app-tools
+Deleted: /etc/my-app-config
+```
+
+A Tar Archive is then created which contains *only* this changeset: The added
+and modified files and directories in their entirety, and for each deleted item
+an entry for an empty file at the same location but with the basename of the
+deleted file or directory prefixed with `.wh.`. The filenames prefixed with
+`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible
+to create an image root filesystem which contains a file or directory with a
+name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has
+the following entries:
+
+```
+/etc/my-app.d/default.cfg
+/bin/my-app-tools
+/etc/.wh.my-app-config
+```
+
+Any given image is likely to be composed of several of these Image Filesystem
+Changeset tar archives.
+
+## Combined Image JSON + Filesystem Changeset Format
+
+There is also a format for a single archive which contains complete information
+about an image, including:
+
+ - repository names/tags
+ - image configuration JSON file
+ - all tar archives of each layer filesystem changesets
+
+For example, here's what the full archive of `library/busybox` is (displayed in
+`tree` format):
+
+```
+.
+├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json
+├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a
+│   ├── VERSION
+│   ├── json
+│   └── layer.tar
+├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198
+│   ├── VERSION
+│   ├── json
+│   └── layer.tar
+├── manifest.json
+└── repositories
+```
+
+There is a directory for each layer in the image. Each directory is named with
+a 64 character hex name that is deterministically generated from the layer
+information. These names are not necessarily layer DiffIDs or ChainIDs. Each of
+these directories contains 3 files:
+
+ * `VERSION` - The schema version of the `json` file
+ * `json` - The legacy JSON metadata for an image layer. In this version of
+ the image specification, layers don't have JSON metadata, but in
+ [version 1](v1.md), they did. A file is created for each layer in the
+ v1 format for backward compatibility.
+ * `layer.tar` - The Tar archive of the filesystem changeset for an image
+ layer.
+
+Note that this directory layout is only important for backward compatibility.
+Current implementations use the paths specified in `manifest.json`.
+
+The content of the `VERSION` files is simply the semantic version of the JSON
+metadata schema:
+
+```
+1.0
+```
+
+The `repositories` file is another JSON file which describes names/tags:
+
+```
+{
+ "busybox":{
+ "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a"
+ }
+}
+```
+
+Every key in this object is the name of a repository, and maps to a collection
+of tag suffixes. Each tag maps to the ID of the image represented by that tag.
+This file is only used for backwards compatibility. Current implementations use
+the `manifest.json` file instead.
+
+The `manifest.json` file provides the image JSON for the top-level image, and
+optionally for parent images that this image was derived from. It consists of
+an array of metadata entries:
+
+```
+[
+ {
+ "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json",
+ "RepoTags": ["busybox:latest"],
+ "Layers": [
+ "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar",
+ "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar"
+ ]
+ }
+]
+```
+
+There is an entry in the array for each image.
+
+The `Config` field references another file in the tar which includes the image
+JSON for this image.
+
+The `RepoTags` field lists references pointing to this image.
+
+The `Layers` field points to the filesystem changeset tars.
+
+An optional `Parent` field references the imageID of the parent image. This
+parent must be part of the same `manifest.json` file.
+
+This file shouldn't be confused with the distribution manifest, used to push
+and pull images.
+
+Generally, implementations that support this version of the spec will use
+the `manifest.json` file if available, and older implementations will use the
+legacy `*/json` files and `repositories`.
diff --git a/image/spec/v1.md b/image/spec/v1.md
new file mode 100644
index 0000000..fce3a06
--- /dev/null
+++ b/image/spec/v1.md
@@ -0,0 +1,573 @@
+# Docker Image Specification v1.0.0
+
+An *Image* is an ordered collection of root filesystem changes and the
+corresponding execution parameters for use within a container runtime. This
+specification outlines the format of these filesystem changes and corresponding
+parameters and describes how to create and use them for use with a container
+runtime and execution tool.
+
+## Terminology
+
+This specification uses the following terms:
+
+<dl>
+ <dt>
+ Layer
+ </dt>
+ <dd>
+ Images are composed of <i>layers</i>. <i>Image layer</i> is a general
+ term which may be used to refer to one or both of the following:
+
+ <ol>
+ <li>The metadata for the layer, described in the JSON format.</li>
+ <li>The filesystem changes described by a layer.</li>
+ </ol>
+
+ To refer to the former you may use the term <i>Layer JSON</i> or
+ <i>Layer Metadata</i>. To refer to the latter you may use the term
+ <i>Image Filesystem Changeset</i> or <i>Image Diff</i>.
+ </dd>
+ <dt>
+ Image JSON
+ </dt>
+ <dd>
+ Each layer has an associated JSON structure which describes some
+ basic information about the image such as date created, author, and the
+ ID of its parent image as well as execution/runtime configuration like
+ its entry point, default arguments, CPU/memory shares, networking, and
+ volumes.
+ </dd>
+ <dt>
+ Image Filesystem Changeset
+ </dt>
+ <dd>
+ Each layer has an archive of the files which have been added, changed,
+ or deleted relative to its parent layer. Using a layer-based or union
+ filesystem such as AUFS, or by computing the diff from filesystem
+ snapshots, the filesystem changeset can be used to present a series of
+ image layers as if they were one cohesive filesystem.
+ </dd>
+ <dt>
+ Image ID <a name="id_desc"></a>
+ </dt>
+ <dd>
+ Each layer is given an ID upon its creation. It is
+ represented as a hexadecimal encoding of 256 bits, e.g.,
+ <code>a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
+ Image IDs should be sufficiently random so as to be globally unique.
+ 32 bytes read from <code>/dev/urandom</code> is sufficient for all
+ practical purposes. Alternatively, an image ID may be derived as a
+ cryptographic hash of image contents as the result is considered
+ indistinguishable from random. The choice is left up to implementors.
+ </dd>
+ <dt>
+ Image Parent
+ </dt>
+ <dd>
+ Most layer metadata structs contain a <code>parent</code> field which
+ refers to the Image from which another directly descends. An image
+ contains a separate JSON metadata file and set of changes relative to
+ the filesystem of its parent image. <i>Image Ancestor</i> and
+ <i>Image Descendant</i> are also common terms.
+ </dd>
+ <dt>
+ Image Checksum
+ </dt>
+ <dd>
+ Layer metadata structs contain a cryptographic hash of the contents of
+ the layer's filesystem changeset. Though the set of changes exists as a
+ simple Tar archive, two archives with identical filenames and content
+ will have different SHA digests if the last-access or last-modified
+ times of any entries differ. For this reason, image checksums are
+ generated using the TarSum algorithm which produces a cryptographic
+ hash of file contents and selected headers only. Details of this
+ algorithm are described in the separate <a href="https://github.com/docker/docker/blob/master/pkg/tarsum/tarsum_spec.md">TarSum specification</a>.
+ </dd>
+ <dt>
+ Tag
+ </dt>
+ <dd>
+ A tag serves to map a descriptive, user-given name to any single image
+ ID. An image name suffix (the name component after <code>:</code>) is
+ often referred to as a tag as well, though it strictly refers to the
+ full name of an image. Acceptable values for a tag suffix are
+ implementation specific, but they SHOULD be limited to the set of
+ alphanumeric characters <code>[a-zA-Z0-9]</code>, punctuation
+ characters <code>[._-]</code>, and MUST NOT contain a <code>:</code>
+ character.
+ </dd>
+ <dt>
+ Repository
+ </dt>
+ <dd>
+ A collection of tags grouped under a common prefix (the name component
+ before <code>:</code>). For example, in an image tagged with the name
+ <code>my-app:3.1.4</code>, <code>my-app</code> is the <i>Repository</i>
+ component of the name. Acceptable values for repository name are
+ implementation specific, but they SHOULD be limited to the set of
+ alphanumeric characters <code>[a-zA-Z0-9]</code>, and punctuation
+ characters <code>[._-]</code>, however it MAY contain additional
+ <code>/</code> and <code>:</code> characters for organizational
+ purposes, with the last <code>:</code> character being interpreted
+ dividing the repository component of the name from the tag suffix
+ component.
+ </dd>
+</dl>
+
+## Image JSON Description
+
+Here is an example image JSON file:
+
+```
+{
+ "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9",
+ "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024",
+ "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b",
+ "created": "2014-10-13T21:19:18.674353812Z",
+ "author": "Alyssa P. Hacker &ltalyspdev@example.com&gt",
+ "architecture": "amd64",
+ "os": "linux",
+ "Size": 271828,
+ "config": {
+ "User": "alice",
+ "Memory": 2048,
+ "MemorySwap": 4096,
+ "CpuShares": 8,
+ "ExposedPorts": {
+ "8080/tcp": {}
+ },
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "FOO=docker_is_a_really",
+ "BAR=great_tool_you_know"
+ ],
+ "Entrypoint": [
+ "/bin/my-app-binary"
+ ],
+ "Cmd": [
+ "--foreground",
+ "--config",
+ "/etc/my-app.d/default.cfg"
+ ],
+ "Volumes": {
+ "/var/job-result-data": {},
+ "/var/log/my-app-logs": {},
+ },
+ "WorkingDir": "/home/alice",
+ }
+}
+```
+
+### Image JSON Field Descriptions
+
+<dl>
+ <dt>
+ id <code>string</code>
+ </dt>
+ <dd>
+ Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies
+ the image.
+ </dd>
+ <dt>
+ parent <code>string</code>
+ </dt>
+ <dd>
+ ID of the parent image. If there is no parent image then this field
+ should be omitted. A collection of images may share many of the same
+ ancestor layers. This organizational structure is strictly a tree with
+ any one layer having either no parent or a single parent and zero or
+ more descendant layers. Cycles are not allowed and implementations
+ should be careful to avoid creating them or iterating through a cycle
+ indefinitely.
+ </dd>
+ <dt>
+ created <code>string</code>
+ </dt>
+ <dd>
+ ISO-8601 formatted combined date and time at which the image was
+ created.
+ </dd>
+ <dt>
+ author <code>string</code>
+ </dt>
+ <dd>
+ Gives the name and/or email address of the person or entity which
+ created and is responsible for maintaining the image.
+ </dd>
+ <dt>
+ architecture <code>string</code>
+ </dt>
+ <dd>
+ The CPU architecture which the binaries in this image are built to run
+ on. Possible values include:
+ <ul>
+ <li>386</li>
+ <li>amd64</li>
+ <li>arm</li>
+ </ul>
+ More values may be supported in the future and any of these may or may
+ not be supported by a given container runtime implementation.
+ </dd>
+ <dt>
+ os <code>string</code>
+ </dt>
+ <dd>
+ The name of the operating system which the image is built to run on.
+ Possible values include:
+ <ul>
+ <li>darwin</li>
+ <li>freebsd</li>
+ <li>linux</li>
+ </ul>
+ More values may be supported in the future and any of these may or may
+ not be supported by a given container runtime implementation.
+ </dd>
+ <dt>
+ checksum <code>string</code>
+ </dt>
+ <dd>
+ Image Checksum of the filesystem changeset associated with the image
+ layer.
+ </dd>
+ <dt>
+ Size <code>integer</code>
+ </dt>
+ <dd>
+ The size in bytes of the filesystem changeset associated with the image
+ layer.
+ </dd>
+ <dt>
+ config <code>struct</code>
+ </dt>
+ <dd>
+ The execution parameters which should be used as a base when running a
+ container using the image. This field can be <code>null</code>, in
+ which case any execution parameters should be specified at creation of
+ the container.
+
+ <h4>Container RunConfig Field Descriptions</h4>
+
+ <dl>
+ <dt>
+ User <code>string</code>
+ </dt>
+ <dd>
+ <p>The username or UID which the process in the container should
+ run as. This acts as a default value to use when the value is
+ not specified when creating a container.</p>
+
+ <p>All of the following are valid:</p>
+
+ <ul>
+ <li><code>user</code></li>
+ <li><code>uid</code></li>
+ <li><code>user:group</code></li>
+ <li><code>uid:gid</code></li>
+ <li><code>uid:group</code></li>
+ <li><code>user:gid</code></li>
+ </ul>
+
+ <p>If <code>group</code>/<code>gid</code> is not specified, the
+ default group and supplementary groups of the given
+ <code>user</code>/<code>uid</code> in <code>/etc/passwd</code>
+ from the container are applied.</p>
+ </dd>
+ <dt>
+ Memory <code>integer</code>
+ </dt>
+ <dd>
+ Memory limit (in bytes). This acts as a default value to use
+ when the value is not specified when creating a container.
+ </dd>
+ <dt>
+ MemorySwap <code>integer</code>
+ </dt>
+ <dd>
+ Total memory usage (memory + swap); set to <code>-1</code> to
+ disable swap. This acts as a default value to use when the
+ value is not specified when creating a container.
+ </dd>
+ <dt>
+ CpuShares <code>integer</code>
+ </dt>
+ <dd>
+ CPU shares (relative weight vs. other containers). This acts as
+ a default value to use when the value is not specified when
+ creating a container.
+ </dd>
+ <dt>
+ ExposedPorts <code>struct</code>
+ </dt>
+ <dd>
+ A set of ports to expose from a container running this image.
+ This JSON structure value is unusual because it is a direct
+ JSON serialization of the Go type
+ <code>map[string]struct{}</code> and is represented in JSON as
+ an object mapping its keys to an empty object. Here is an
+ example:
+
+<pre>{
+ "8080": {},
+ "53/udp": {},
+ "2356/tcp": {}
+}</pre>
+
+ Its keys can be in the format of:
+ <ul>
+ <li>
+ <code>"port/tcp"</code>
+ </li>
+ <li>
+ <code>"port/udp"</code>
+ </li>
+ <li>
+ <code>"port"</code>
+ </li>
+ </ul>
+ with the default protocol being <code>"tcp"</code> if not
+ specified.
+
+ These values act as defaults and are merged with any specified
+ when creating a container.
+ </dd>
+ <dt>
+ Env <code>array of strings</code>
+ </dt>
+ <dd>
+ Entries are in the format of <code>VARNAME="var value"</code>.
+ These values act as defaults and are merged with any specified
+ when creating a container.
+ </dd>
+ <dt>
+ Entrypoint <code>array of strings</code>
+ </dt>
+ <dd>
+ A list of arguments to use as the command to execute when the
+ container starts. This value acts as a default and is replaced
+ by an entrypoint specified when creating a container.
+ </dd>
+ <dt>
+ Cmd <code>array of strings</code>
+ </dt>
+ <dd>
+ Default arguments to the entry point of the container. These
+ values act as defaults and are replaced with any specified when
+ creating a container. If an <code>Entrypoint</code> value is
+ not specified, then the first entry of the <code>Cmd</code>
+ array should be interpreted as the executable to run.
+ </dd>
+ <dt>
+ Volumes <code>struct</code>
+ </dt>
+ <dd>
+ A set of directories which should be created as data volumes in
+ a container running this image. This JSON structure value is
+ unusual because it is a direct JSON serialization of the Go
+ type <code>map[string]struct{}</code> and is represented in
+ JSON as an object mapping its keys to an empty object. Here is
+ an example:
+<pre>{
+ "/var/my-app-data/": {},
+ "/etc/some-config.d/": {},
+}</pre>
+ </dd>
+ <dt>
+ WorkingDir <code>string</code>
+ </dt>
+ <dd>
+ Sets the current working directory of the entry point process
+ in the container. This value acts as a default and is replaced
+ by a working directory specified when creating a container.
+ </dd>
+ </dl>
+ </dd>
+</dl>
+
+Any extra fields in the Image JSON struct are considered implementation
+specific and should be ignored by any implementations which are unable to
+interpret them.
+
+## Creating an Image Filesystem Changeset
+
+An example of creating an Image Filesystem Changeset follows.
+
+An image root filesystem is first created as an empty directory named with the
+ID of the image being created. Here is the initial empty directory structure
+for the changeset for an image with ID `c3167915dc9d` ([real IDs are much
+longer](#id_desc), but this example use a truncated one here for brevity.
+Implementations need not name the rootfs directory in this way but it may be
+convenient for keeping record of a large number of image layers.):
+
+```
+c3167915dc9d/
+```
+
+Files and directories are then created:
+
+```
+c3167915dc9d/
+ etc/
+ my-app-config
+ bin/
+ my-app-binary
+ my-app-tools
+```
+
+The `c3167915dc9d` directory is then committed as a plain Tar archive with
+entries for the following files:
+
+```
+etc/my-app-config
+bin/my-app-binary
+bin/my-app-tools
+```
+
+The TarSum checksum for the archive file is then computed and placed in the
+JSON metadata along with the execution parameters.
+
+To make changes to the filesystem of this container image, create a new
+directory named with a new ID, such as `f60c56784b83`, and initialize it with
+a snapshot of the parent image's root filesystem, so that the directory is
+identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem
+can make this very efficient:
+
+```
+f60c56784b83/
+ etc/
+ my-app-config
+ bin/
+ my-app-binary
+ my-app-tools
+```
+
+This example change is going to add a configuration directory at `/etc/my-app.d`
+which contains a default config file. There's also a change to the
+`my-app-tools` binary to handle the config layout change. The `f60c56784b83`
+directory then looks like this:
+
+```
+f60c56784b83/
+ etc/
+ my-app.d/
+ default.cfg
+ bin/
+ my-app-binary
+ my-app-tools
+```
+
+This reflects the removal of `/etc/my-app-config` and creation of a file and
+directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been
+replaced with an updated version. Before committing this directory to a
+changeset, because it has a parent image, it is first compared with the
+directory tree of the parent snapshot, `f60c56784b83`, looking for files and
+directories that have been added, modified, or removed. The following changeset
+is found:
+
+```
+Added: /etc/my-app.d/default.cfg
+Modified: /bin/my-app-tools
+Deleted: /etc/my-app-config
+```
+
+A Tar Archive is then created which contains *only* this changeset: The added
+and modified files and directories in their entirety, and for each deleted item
+an entry for an empty file at the same location but with the basename of the
+deleted file or directory prefixed with `.wh.`. The filenames prefixed with
+`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible
+to create an image root filesystem which contains a file or directory with a
+name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has
+the following entries:
+
+```
+/etc/my-app.d/default.cfg
+/bin/my-app-tools
+/etc/.wh.my-app-config
+```
+
+Any given image is likely to be composed of several of these Image Filesystem
+Changeset tar archives.
+
+## Combined Image JSON + Filesystem Changeset Format
+
+There is also a format for a single archive which contains complete information
+about an image, including:
+
+ - repository names/tags
+ - all image layer JSON files
+ - all tar archives of each layer filesystem changesets
+
+For example, here's what the full archive of `library/busybox` is (displayed in
+`tree` format):
+
+```
+.
+├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e
+│   ├── VERSION
+│   ├── json
+│   └── layer.tar
+├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a
+│   ├── VERSION
+│   ├── json
+│   └── layer.tar
+├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb
+│   ├── VERSION
+│   ├── json
+│   └── layer.tar
+├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c
+│   ├── VERSION
+│   ├── json
+│   └── layer.tar
+└── repositories
+```
+
+There are one or more directories named with the ID for each layer in a full
+image. Each of these directories contains 3 files:
+
+ * `VERSION` - The schema version of the `json` file
+ * `json` - The JSON metadata for an image layer
+ * `layer.tar` - The Tar archive of the filesystem changeset for an image
+ layer.
+
+The content of the `VERSION` files is simply the semantic version of the JSON
+metadata schema:
+
+```
+1.0
+```
+
+And the `repositories` file is another JSON file which describes names/tags:
+
+```
+{
+ "busybox":{
+ "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e"
+ }
+}
+```
+
+Every key in this object is the name of a repository, and maps to a collection
+of tag suffixes. Each tag maps to the ID of the image represented by that tag.
+
+## Loading an Image Filesystem Changeset
+
+Unpacking a bundle of image layer JSON files and their corresponding filesystem
+changesets can be done using a series of steps:
+
+1. Follow the parent IDs of image layers to find the root ancestor (an image
+with no parent ID specified).
+2. For every image layer, in order from root ancestor and descending down,
+extract the contents of that layer's filesystem changeset archive into a
+directory which will be used as the root of a container filesystem.
+
+ - Extract all contents of each archive.
+ - Walk the directory tree once more, removing any files with the prefix
+ `.wh.` and the corresponding file or directory named without this prefix.
+
+
+## Implementations
+
+This specification is an admittedly imperfect description of an
+imperfectly-understood problem. The Docker project is, in turn, an attempt to
+implement this specification. Our goal and our execution toward it will evolve
+over time, but our primary concern in this specification and in our
+implementation is compatibility and interoperability.
diff --git a/image/store.go b/image/store.go
new file mode 100644
index 0000000..3ab5487
--- /dev/null
+++ b/image/store.go
@@ -0,0 +1,367 @@
+package image
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/distribution/digestset"
+ "github.com/docker/docker/layer"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/system"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// Store is an interface for creating and accessing images
+type Store interface {
+ Create(config []byte) (ID, error)
+ Get(id ID) (*Image, error)
+ GetTarSeekStream(id ID) (ioutils.ReadSeekCloser, error)
+ Delete(id ID) ([]layer.Metadata, error)
+ Search(partialID string) (ID, error)
+ SetParent(id ID, parent ID) error
+ GetParent(id ID) (ID, error)
+ SetLastUpdated(id ID) error
+ GetLastUpdated(id ID) (time.Time, error)
+ Children(id ID) []ID
+ Map() map[ID]*Image
+ Heads() map[ID]*Image
+}
+
+// LayerGetReleaser is a minimal interface for getting and releasing images.
+type LayerGetReleaser interface {
+ Get(layer.ChainID) (layer.Layer, error)
+ Release(layer.Layer) ([]layer.Metadata, error)
+}
+
+type imageMeta struct {
+ layer layer.Layer
+ children map[ID]struct{}
+}
+
+type store struct {
+ sync.RWMutex
+ ls LayerGetReleaser
+ images map[ID]*imageMeta
+ fs StoreBackend
+ digestSet *digestset.Set
+ platform string
+}
+
+// NewImageStore returns new store object for given layer store
+func NewImageStore(fs StoreBackend, platform string, ls LayerGetReleaser) (Store, error) {
+ is := &store{
+ ls: ls,
+ images: make(map[ID]*imageMeta),
+ fs: fs,
+ digestSet: digestset.NewSet(),
+ platform: platform,
+ }
+
+ // load all current images and retain layers
+ if err := is.restore(); err != nil {
+ return nil, err
+ }
+
+ return is, nil
+}
+
+func (is *store) restore() error {
+ err := is.fs.Walk(func(dgst digest.Digest) error {
+ img, err := is.Get(IDFromDigest(dgst))
+ if err != nil {
+ logrus.Errorf("invalid image %v, %v", dgst, err)
+ return nil
+ }
+ var l layer.Layer
+ if chainID := img.RootFS.ChainID(); chainID != "" {
+ l, err = is.ls.Get(chainID)
+ if err != nil {
+ return err
+ }
+ }
+ if err := is.digestSet.Add(dgst); err != nil {
+ return err
+ }
+
+ imageMeta := &imageMeta{
+ layer: l,
+ children: make(map[ID]struct{}),
+ }
+
+ is.images[IDFromDigest(dgst)] = imageMeta
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Second pass to fill in children maps
+ for id := range is.images {
+ if parent, err := is.GetParent(id); err == nil {
+ if parentMeta := is.images[parent]; parentMeta != nil {
+ parentMeta.children[id] = struct{}{}
+ }
+ }
+ }
+
+ return nil
+}
+
+func (is *store) Create(config []byte) (ID, error) {
+ var img Image
+ err := json.Unmarshal(config, &img)
+ if err != nil {
+ return "", err
+ }
+
+ // TODO @jhowardmsft - LCOW Support. This will need revisiting.
+ // Integrity check - ensure we are creating something for the correct platform
+ if system.LCOWSupported() {
+ if strings.ToLower(img.Platform()) != strings.ToLower(is.platform) {
+ return "", fmt.Errorf("cannot create entry for platform %q in image store for platform %q", img.Platform(), is.platform)
+ }
+ }
+
+ // Must reject any config that references diffIDs from the history
+ // which aren't among the rootfs layers.
+ rootFSLayers := make(map[layer.DiffID]struct{})
+ for _, diffID := range img.RootFS.DiffIDs {
+ rootFSLayers[diffID] = struct{}{}
+ }
+
+ layerCounter := 0
+ for _, h := range img.History {
+ if !h.EmptyLayer {
+ layerCounter++
+ }
+ }
+ if layerCounter > len(img.RootFS.DiffIDs) {
+ return "", errors.New("too many non-empty layers in History section")
+ }
+
+ dgst, err := is.fs.Set(config)
+ if err != nil {
+ return "", err
+ }
+ imageID := IDFromDigest(dgst)
+
+ is.Lock()
+ defer is.Unlock()
+
+ if _, exists := is.images[imageID]; exists {
+ return imageID, nil
+ }
+
+ layerID := img.RootFS.ChainID()
+
+ var l layer.Layer
+ if layerID != "" {
+ l, err = is.ls.Get(layerID)
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to get layer %s", layerID)
+ }
+ }
+
+ imageMeta := &imageMeta{
+ layer: l,
+ children: make(map[ID]struct{}),
+ }
+
+ is.images[imageID] = imageMeta
+ if err := is.digestSet.Add(imageID.Digest()); err != nil {
+ delete(is.images, imageID)
+ return "", err
+ }
+
+ return imageID, nil
+}
+
+func (is *store) Search(term string) (ID, error) {
+ dgst, err := is.digestSet.Lookup(term)
+ if err != nil {
+ if err == digestset.ErrDigestNotFound {
+ err = fmt.Errorf("No such image: %s", term)
+ }
+ return "", err
+ }
+ return IDFromDigest(dgst), nil
+}
+
+func (is *store) Get(id ID) (*Image, error) {
+ // todo: Check if image is in images
+ // todo: Detect manual insertions and start using them
+ config, err := is.fs.Get(id.Digest())
+ if err != nil {
+ return nil, err
+ }
+
+ img, err := NewFromJSON(config)
+ if err != nil {
+ return nil, err
+ }
+ img.computedID = id
+
+ img.Parent, err = is.GetParent(id)
+ if err != nil {
+ img.Parent = ""
+ }
+
+ return img, nil
+}
+
+// GetTarSeekStream returns a concatenation of Tar streams
+func (is *store) GetTarSeekStream(id ID) (ioutils.ReadSeekCloser, error) {
+ img, err := is.Get(id)
+ if err != nil {
+ return nil, err
+ }
+
+ var result ioutils.ReadSeekCloser
+
+ for i := range img.RootFS.DiffIDs {
+ rootFS := *img.RootFS
+ rootFS.DiffIDs = rootFS.DiffIDs[:i+1]
+
+ l, err := is.ls.Get(rootFS.ChainID())
+ if err != nil {
+ return nil, err
+ }
+
+ arch, err := l.TarSeekStream()
+ if err != nil {
+ return nil, err
+ }
+
+ stream := ioutils.NewReadSeekCloserWrapper(arch, func() error {
+ _, err := is.ls.Release(l)
+ return err
+ })
+
+ if result == nil {
+ result = stream
+ } else {
+ result, err = ioutils.ConcatReadSeekClosers(result, stream)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return result, nil
+}
+
+func (is *store) Delete(id ID) ([]layer.Metadata, error) {
+ is.Lock()
+ defer is.Unlock()
+
+ imageMeta := is.images[id]
+ if imageMeta == nil {
+ return nil, fmt.Errorf("unrecognized image ID %s", id.String())
+ }
+ for id := range imageMeta.children {
+ is.fs.DeleteMetadata(id.Digest(), "parent")
+ }
+ if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
+ delete(is.images[parent].children, id)
+ }
+
+ if err := is.digestSet.Remove(id.Digest()); err != nil {
+ logrus.Errorf("error removing %s from digest set: %q", id, err)
+ }
+ delete(is.images, id)
+ is.fs.Delete(id.Digest())
+
+ if imageMeta.layer != nil {
+ return is.ls.Release(imageMeta.layer)
+ }
+ return nil, nil
+}
+
+func (is *store) SetParent(id, parent ID) error {
+ is.Lock()
+ defer is.Unlock()
+ parentMeta := is.images[parent]
+ if parentMeta == nil {
+ return fmt.Errorf("unknown parent image ID %s", parent.String())
+ }
+ if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
+ delete(is.images[parent].children, id)
+ }
+ parentMeta.children[id] = struct{}{}
+ return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent))
+}
+
+func (is *store) GetParent(id ID) (ID, error) {
+ d, err := is.fs.GetMetadata(id.Digest(), "parent")
+ if err != nil {
+ return "", err
+ }
+ return ID(d), nil // todo: validate?
+}
+
+// SetLastUpdated time for the image ID to the current time
+func (is *store) SetLastUpdated(id ID) error {
+ lastUpdated := []byte(time.Now().Format(time.RFC3339Nano))
+ return is.fs.SetMetadata(id.Digest(), "lastUpdated", lastUpdated)
+}
+
+// GetLastUpdated time for the image ID
+func (is *store) GetLastUpdated(id ID) (time.Time, error) {
+ bytes, err := is.fs.GetMetadata(id.Digest(), "lastUpdated")
+ if err != nil || len(bytes) == 0 {
+ // No lastUpdated time
+ return time.Time{}, nil
+ }
+ return time.Parse(time.RFC3339Nano, string(bytes))
+}
+
+func (is *store) Children(id ID) []ID {
+ is.RLock()
+ defer is.RUnlock()
+
+ return is.children(id)
+}
+
+func (is *store) children(id ID) []ID {
+ var ids []ID
+ if is.images[id] != nil {
+ for id := range is.images[id].children {
+ ids = append(ids, id)
+ }
+ }
+ return ids
+}
+
+func (is *store) Heads() map[ID]*Image {
+ return is.imagesMap(false)
+}
+
+func (is *store) Map() map[ID]*Image {
+ return is.imagesMap(true)
+}
+
+func (is *store) imagesMap(all bool) map[ID]*Image {
+ is.RLock()
+ defer is.RUnlock()
+
+ images := make(map[ID]*Image)
+
+ for id := range is.images {
+ if !all && len(is.children(id)) > 0 {
+ continue
+ }
+ img, err := is.Get(id)
+ if err != nil {
+ logrus.Errorf("invalid image access: %q, error: %q", id, err)
+ continue
+ }
+ images[id] = img
+ }
+ return images
+}
diff --git a/image/store_test.go b/image/store_test.go
new file mode 100644
index 0000000..fc6d461
--- /dev/null
+++ b/image/store_test.go
@@ -0,0 +1,178 @@
+package image
+
+import (
+ "runtime"
+ "testing"
+
+ "github.com/docker/docker/layer"
+ "github.com/docker/docker/pkg/testutil"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRestore(t *testing.T) {
+ fs, cleanup := defaultFSStoreBackend(t)
+ defer cleanup()
+
+ id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`))
+ assert.NoError(t, err)
+
+ _, err = fs.Set([]byte(`invalid`))
+ assert.NoError(t, err)
+
+ id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
+ assert.NoError(t, err)
+
+ err = fs.SetMetadata(id2, "parent", []byte(id1))
+ assert.NoError(t, err)
+
+ is, err := NewImageStore(fs, runtime.GOOS, &mockLayerGetReleaser{})
+ assert.NoError(t, err)
+
+ assert.Len(t, is.Map(), 2)
+
+ img1, err := is.Get(ID(id1))
+ assert.NoError(t, err)
+ assert.Equal(t, ID(id1), img1.computedID)
+ assert.Equal(t, string(id1), img1.computedID.String())
+
+ img2, err := is.Get(ID(id2))
+ assert.NoError(t, err)
+ assert.Equal(t, "abc", img1.Comment)
+ assert.Equal(t, "def", img2.Comment)
+
+ p, err := is.GetParent(ID(id1))
+ testutil.ErrorContains(t, err, "failed to read metadata")
+
+ p, err = is.GetParent(ID(id2))
+ assert.NoError(t, err)
+ assert.Equal(t, ID(id1), p)
+
+ children := is.Children(ID(id1))
+ assert.Len(t, children, 1)
+ assert.Equal(t, ID(id2), children[0])
+ assert.Len(t, is.Heads(), 1)
+
+ sid1, err := is.Search(string(id1)[:10])
+ assert.NoError(t, err)
+ assert.Equal(t, ID(id1), sid1)
+
+ sid1, err = is.Search(digest.Digest(id1).Hex()[:6])
+ assert.NoError(t, err)
+ assert.Equal(t, ID(id1), sid1)
+
+ invalidPattern := digest.Digest(id1).Hex()[1:6]
+ _, err = is.Search(invalidPattern)
+ testutil.ErrorContains(t, err, "No such image")
+}
+
+func TestAddDelete(t *testing.T) {
+ is, cleanup := defaultImageStore(t)
+ defer cleanup()
+
+ id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
+ assert.NoError(t, err)
+ assert.Equal(t, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"), id1)
+
+ img, err := is.Get(id1)
+ assert.NoError(t, err)
+ assert.Equal(t, "abc", img.Comment)
+
+ id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
+ assert.NoError(t, err)
+
+ err = is.SetParent(id2, id1)
+ assert.NoError(t, err)
+
+ pid1, err := is.GetParent(id2)
+ assert.NoError(t, err)
+ assert.Equal(t, pid1, id1)
+
+ _, err = is.Delete(id1)
+ assert.NoError(t, err)
+
+ _, err = is.Get(id1)
+ testutil.ErrorContains(t, err, "failed to get digest")
+
+ _, err = is.Get(id2)
+ assert.NoError(t, err)
+
+ _, err = is.GetParent(id2)
+ testutil.ErrorContains(t, err, "failed to read metadata")
+}
+
+func TestSearchAfterDelete(t *testing.T) {
+ is, cleanup := defaultImageStore(t)
+ defer cleanup()
+
+ id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`))
+ assert.NoError(t, err)
+
+ id1, err := is.Search(string(id)[:15])
+ assert.NoError(t, err)
+ assert.Equal(t, id1, id)
+
+ _, err = is.Delete(id)
+ assert.NoError(t, err)
+
+ _, err = is.Search(string(id)[:15])
+ testutil.ErrorContains(t, err, "No such image")
+}
+
+func TestParentReset(t *testing.T) {
+ is, cleanup := defaultImageStore(t)
+ defer cleanup()
+
+ id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`))
+ assert.NoError(t, err)
+
+ id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`))
+ assert.NoError(t, err)
+
+ id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`))
+ assert.NoError(t, err)
+
+ assert.NoError(t, is.SetParent(id, id2))
+ assert.Len(t, is.Children(id2), 1)
+
+ assert.NoError(t, is.SetParent(id, id3))
+ assert.Len(t, is.Children(id2), 0)
+ assert.Len(t, is.Children(id3), 1)
+}
+
+func defaultImageStore(t *testing.T) (Store, func()) {
+ fsBackend, cleanup := defaultFSStoreBackend(t)
+
+ store, err := NewImageStore(fsBackend, runtime.GOOS, &mockLayerGetReleaser{})
+ assert.NoError(t, err)
+
+ return store, cleanup
+}
+
+func TestGetAndSetLastUpdated(t *testing.T) {
+ store, cleanup := defaultImageStore(t)
+ defer cleanup()
+
+ id, err := store.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`))
+ assert.NoError(t, err)
+
+ updated, err := store.GetLastUpdated(id)
+ assert.NoError(t, err)
+ assert.Equal(t, updated.IsZero(), true)
+
+ assert.NoError(t, store.SetLastUpdated(id))
+
+ updated, err = store.GetLastUpdated(id)
+ assert.NoError(t, err)
+ assert.Equal(t, updated.IsZero(), false)
+}
+
+type mockLayerGetReleaser struct{}
+
+func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) {
+ return nil, nil
+}
+
+func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) {
+ return nil, nil
+}
diff --git a/image/tarexport/load.go b/image/tarexport/load.go
new file mode 100644
index 0000000..af8cefc
--- /dev/null
+++ b/image/tarexport/load.go
@@ -0,0 +1,431 @@
+package tarexport
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/image"
+ "github.com/docker/docker/image/v1"
+ "github.com/docker/docker/layer"
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/chrootarchive"
+ "github.com/docker/docker/pkg/progress"
+ "github.com/docker/docker/pkg/streamformatter"
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/docker/pkg/symlink"
+ "github.com/docker/docker/pkg/system"
+ "github.com/opencontainers/go-digest"
+)
+
+func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
+ var progressOutput progress.Output
+ if !quiet {
+ progressOutput = streamformatter.NewJSONProgressOutput(outStream, false)
+ }
+ outStream = streamformatter.NewStdoutWriter(outStream)
+
+ tmpDir, err := ioutil.TempDir("", "docker-import-")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil {
+ return err
+ }
+ // read manifest, if no file then load in legacy mode
+ manifestPath, err := safePath(tmpDir, manifestFileName)
+ if err != nil {
+ return err
+ }
+ manifestFile, err := os.Open(manifestPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return l.legacyLoad(tmpDir, outStream, progressOutput)
+ }
+ return err
+ }
+ defer manifestFile.Close()
+
+ var manifest []manifestItem
+ if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil {
+ return err
+ }
+
+ var parentLinks []parentLink
+ var imageIDsStr string
+ var imageRefCount int
+
+ for _, m := range manifest {
+ configPath, err := safePath(tmpDir, m.Config)
+ if err != nil {
+ return err
+ }
+ config, err := ioutil.ReadFile(configPath)
+ if err != nil {
+ return err
+ }
+ img, err := image.NewFromJSON(config)
+ if err != nil {
+ return err
+ }
+ if err := checkCompatibleOS(img.OS); err != nil {
+ return err
+ }
+ var rootFS image.RootFS
+ rootFS = *img.RootFS
+ rootFS.DiffIDs = nil
+
+ if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual {
+ return fmt.Errorf("invalid manifest, layers length mismatch: expected %d, got %d", expected, actual)
+ }
+
+ // On Windows, validate the platform, defaulting to windows if not present.
+ platform := layer.Platform(img.OS)
+ if runtime.GOOS == "windows" {
+ if platform == "" {
+ platform = "windows"
+ }
+ if (platform != "windows") && (platform != "linux") {
+ return fmt.Errorf("configuration for this image has an unsupported platform: %s", platform)
+ }
+ }
+
+ for i, diffID := range img.RootFS.DiffIDs {
+ layerPath, err := safePath(tmpDir, m.Layers[i])
+ if err != nil {
+ return err
+ }
+ r := rootFS
+ r.Append(diffID)
+ newLayer, err := l.ls.Get(r.ChainID())
+ if err != nil {
+ newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), platform, m.LayerSources[diffID], progressOutput)
+ if err != nil {
+ return err
+ }
+ }
+ defer layer.ReleaseAndLog(l.ls, newLayer)
+ if expected, actual := diffID, newLayer.DiffID(); expected != actual {
+ return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual)
+ }
+ rootFS.Append(diffID)
+ }
+
+ imgID, err := l.is.Create(config)
+ if err != nil {
+ return err
+ }
+ imageIDsStr += fmt.Sprintf("Loaded image ID: %s\n", imgID)
+
+ imageRefCount = 0
+ for _, repoTag := range m.RepoTags {
+ named, err := reference.ParseNormalizedNamed(repoTag)
+ if err != nil {
+ return err
+ }
+ ref, ok := named.(reference.NamedTagged)
+ if !ok {
+ return fmt.Errorf("invalid tag %q", repoTag)
+ }
+ l.setLoadedTag(ref, imgID.Digest(), outStream)
+ outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", reference.FamiliarString(ref))))
+ imageRefCount++
+ }
+
+ parentLinks = append(parentLinks, parentLink{imgID, m.Parent})
+ l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load")
+ }
+
+ for _, p := range validatedParentLinks(parentLinks) {
+ if p.parentID != "" {
+ if err := l.setParentID(p.id, p.parentID); err != nil {
+ return err
+ }
+ }
+ }
+
+ if imageRefCount == 0 {
+ outStream.Write([]byte(imageIDsStr))
+ }
+
+ return nil
+}
+
+func (l *tarexporter) setParentID(id, parentID image.ID) error {
+ img, err := l.is.Get(id)
+ if err != nil {
+ return err
+ }
+ parent, err := l.is.Get(parentID)
+ if err != nil {
+ return err
+ }
+ if !checkValidParent(img, parent) {
+ return fmt.Errorf("image %v is not a valid parent for %v", parent.ID(), img.ID())
+ }
+ return l.is.SetParent(id, parentID)
+}
+
+func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, platform layer.Platform, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) {
+ // We use system.OpenSequential to use sequential file access on Windows, avoiding
+ // depleting the standby list. On Linux, this equates to a regular os.Open.
+ rawTar, err := system.OpenSequential(filename)
+ if err != nil {
+ logrus.Debugf("Error reading embedded tar: %v", err)
+ return nil, err
+ }
+ defer rawTar.Close()
+
+ var r io.Reader
+ if progressOutput != nil {
+ fileInfo, err := rawTar.Stat()
+ if err != nil {
+ logrus.Debugf("Error statting file: %v", err)
+ return nil, err
+ }
+
+ r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer")
+ } else {
+ r = rawTar
+ }
+
+ inflatedLayerData, err := archive.DecompressStream(r)
+ if err != nil {
+ return nil, err
+ }
+ defer inflatedLayerData.Close()
+
+ if ds, ok := l.ls.(layer.DescribableStore); ok {
+ return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), platform, foreignSrc)
+ }
+ return l.ls.Register(inflatedLayerData, rootFS.ChainID(), platform)
+}
+
+func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Digest, outStream io.Writer) error {
+ if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID {
+ fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", reference.FamiliarString(ref), string(prevID)) // todo: this message is wrong in case of multiple tags
+ }
+
+ if err := l.rs.AddTag(ref, imgID, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error {
+ if runtime.GOOS == "windows" {
+ return errors.New("Windows does not support legacy loading of images")
+ }
+
+ legacyLoadedMap := make(map[string]image.ID)
+
+ dirs, err := ioutil.ReadDir(tmpDir)
+ if err != nil {
+ return err
+ }
+
+ // every dir represents an image
+ for _, d := range dirs {
+ if d.IsDir() {
+ if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil {
+ return err
+ }
+ }
+ }
+
+ // load tags from repositories file
+ repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName)
+ if err != nil {
+ return err
+ }
+ repositoriesFile, err := os.Open(repositoriesPath)
+ if err != nil {
+ return err
+ }
+ defer repositoriesFile.Close()
+
+ repositories := make(map[string]map[string]string)
+ if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil {
+ return err
+ }
+
+ for name, tagMap := range repositories {
+ for tag, oldID := range tagMap {
+ imgID, ok := legacyLoadedMap[oldID]
+ if !ok {
+ return fmt.Errorf("invalid target ID: %v", oldID)
+ }
+ named, err := reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return err
+ }
+ ref, err := reference.WithTag(named, tag)
+ if err != nil {
+ return err
+ }
+ l.setLoadedTag(ref, imgID.Digest(), outStream)
+ }
+ }
+
+ return nil
+}
+
+func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error {
+ if _, loaded := loadedMap[oldID]; loaded {
+ return nil
+ }
+ configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName))
+ if err != nil {
+ return err
+ }
+ imageJSON, err := ioutil.ReadFile(configPath)
+ if err != nil {
+ logrus.Debugf("Error reading json: %v", err)
+ return err
+ }
+
+ var img struct {
+ OS string
+ Parent string
+ }
+ if err := json.Unmarshal(imageJSON, &img); err != nil {
+ return err
+ }
+
+ if err := checkCompatibleOS(img.OS); err != nil {
+ return err
+ }
+
+ var parentID image.ID
+ if img.Parent != "" {
+ for {
+ var loaded bool
+ if parentID, loaded = loadedMap[img.Parent]; !loaded {
+ if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil {
+ return err
+ }
+ } else {
+ break
+ }
+ }
+ }
+
+ // todo: try to connect with migrate code
+ rootFS := image.NewRootFS()
+ var history []image.History
+
+ if parentID != "" {
+ parentImg, err := l.is.Get(parentID)
+ if err != nil {
+ return err
+ }
+
+ rootFS = parentImg.RootFS
+ history = parentImg.History
+ }
+
+ layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName))
+ if err != nil {
+ return err
+ }
+ newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, "", distribution.Descriptor{}, progressOutput)
+ if err != nil {
+ return err
+ }
+ rootFS.Append(newLayer.DiffID())
+
+ h, err := v1.HistoryFromConfig(imageJSON, false)
+ if err != nil {
+ return err
+ }
+ history = append(history, h)
+
+ config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history)
+ if err != nil {
+ return err
+ }
+ imgID, err := l.is.Create(config)
+ if err != nil {
+ return err
+ }
+
+ metadata, err := l.ls.Release(newLayer)
+ layer.LogReleaseMetadata(metadata)
+ if err != nil {
+ return err
+ }
+
+ if parentID != "" {
+ if err := l.is.SetParent(imgID, parentID); err != nil {
+ return err
+ }
+ }
+
+ loadedMap[oldID] = imgID
+ return nil
+}
+
+func safePath(base, path string) (string, error) {
+ return symlink.FollowSymlinkInScope(filepath.Join(base, path), base)
+}
+
+type parentLink struct {
+ id, parentID image.ID
+}
+
+func validatedParentLinks(pl []parentLink) (ret []parentLink) {
+mainloop:
+ for i, p := range pl {
+ ret = append(ret, p)
+ for _, p2 := range pl {
+ if p2.id == p.parentID && p2.id != p.id {
+ continue mainloop
+ }
+ }
+ ret[i].parentID = ""
+ }
+ return
+}
+
+func checkValidParent(img, parent *image.Image) bool {
+ if len(img.History) == 0 && len(parent.History) == 0 {
+ return true // having history is not mandatory
+ }
+ if len(img.History)-len(parent.History) != 1 {
+ return false
+ }
+ for i, h := range parent.History {
+ if !reflect.DeepEqual(h, img.History[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func checkCompatibleOS(os string) error {
+ // TODO @jhowardmsft LCOW - revisit for simultaneous platforms
+ platform := runtime.GOOS
+ if system.LCOWSupported() {
+ platform = "linux"
+ }
+ // always compatible if the OS matches; also match an empty OS
+ if os == platform || os == "" {
+ return nil
+ }
+ // for compatibility, only fail if the image or runtime OS is Windows
+ if os == "windows" || platform == "windows" {
+ return fmt.Errorf("cannot load %s image on %s", os, platform)
+ }
+ return nil
+}
diff --git a/image/tarexport/save.go b/image/tarexport/save.go
new file mode 100644
index 0000000..d304a54
--- /dev/null
+++ b/image/tarexport/save.go
@@ -0,0 +1,409 @@
+package tarexport
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/image"
+ "github.com/docker/docker/image/v1"
+ "github.com/docker/docker/layer"
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/system"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+type imageDescriptor struct {
+ refs []reference.NamedTagged
+ layers []string
+ image *image.Image
+ layerRef layer.Layer
+}
+
+type saveSession struct {
+ *tarexporter
+ outDir string
+ images map[image.ID]*imageDescriptor
+ savedLayers map[string]struct{}
+ diffIDPaths map[layer.DiffID]string // cache every diffID blob to avoid duplicates
+}
+
+func (l *tarexporter) Save(names []string, outStream io.Writer) error {
+ images, err := l.parseNames(names)
+ if err != nil {
+ return err
+ }
+
+ // Release all the image top layer references
+ defer l.releaseLayerReferences(images)
+ return (&saveSession{tarexporter: l, images: images}).save(outStream)
+}
+
+// parseNames will parse the image names to a map which contains image.ID to *imageDescriptor.
+// Each imageDescriptor holds an image top layer reference named 'layerRef'. It is taken here, should be released later.
+func (l *tarexporter) parseNames(names []string) (desc map[image.ID]*imageDescriptor, rErr error) {
+ imgDescr := make(map[image.ID]*imageDescriptor)
+ defer func() {
+ if rErr != nil {
+ l.releaseLayerReferences(imgDescr)
+ }
+ }()
+
+ addAssoc := func(id image.ID, ref reference.Named) error {
+ if _, ok := imgDescr[id]; !ok {
+ descr := &imageDescriptor{}
+ if err := l.takeLayerReference(id, descr); err != nil {
+ return err
+ }
+ imgDescr[id] = descr
+ }
+
+ if ref != nil {
+ if _, ok := ref.(reference.Canonical); ok {
+ return nil
+ }
+ tagged, ok := reference.TagNameOnly(ref).(reference.NamedTagged)
+ if !ok {
+ return nil
+ }
+
+ for _, t := range imgDescr[id].refs {
+ if tagged.String() == t.String() {
+ return nil
+ }
+ }
+ imgDescr[id].refs = append(imgDescr[id].refs, tagged)
+ }
+ return nil
+ }
+
+ for _, name := range names {
+ ref, err := reference.ParseAnyReference(name)
+ if err != nil {
+ return nil, err
+ }
+ namedRef, ok := ref.(reference.Named)
+ if !ok {
+ // Check if digest ID reference
+ if digested, ok := ref.(reference.Digested); ok {
+ id := image.IDFromDigest(digested.Digest())
+ if err := addAssoc(id, nil); err != nil {
+ return nil, err
+ }
+ continue
+ }
+ return nil, errors.Errorf("invalid reference: %v", name)
+ }
+
+ if reference.FamiliarName(namedRef) == string(digest.Canonical) {
+ imgID, err := l.is.Search(name)
+ if err != nil {
+ return nil, err
+ }
+ if err := addAssoc(imgID, nil); err != nil {
+ return nil, err
+ }
+ continue
+ }
+ if reference.IsNameOnly(namedRef) {
+ assocs := l.rs.ReferencesByName(namedRef)
+ for _, assoc := range assocs {
+ if err := addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref); err != nil {
+ return nil, err
+ }
+ }
+ if len(assocs) == 0 {
+ imgID, err := l.is.Search(name)
+ if err != nil {
+ return nil, err
+ }
+ if err := addAssoc(imgID, nil); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+ id, err := l.rs.Get(namedRef)
+ if err != nil {
+ return nil, err
+ }
+ if err := addAssoc(image.IDFromDigest(id), namedRef); err != nil {
+ return nil, err
+ }
+
+ }
+ return imgDescr, nil
+}
+
+// takeLayerReference will take/Get the image top layer reference
+func (l *tarexporter) takeLayerReference(id image.ID, imgDescr *imageDescriptor) error {
+ img, err := l.is.Get(id)
+ if err != nil {
+ return err
+ }
+ imgDescr.image = img
+ topLayerID := img.RootFS.ChainID()
+ if topLayerID == "" {
+ return nil
+ }
+ layer, err := l.ls.Get(topLayerID)
+ if err != nil {
+ return err
+ }
+ imgDescr.layerRef = layer
+ return nil
+}
+
+// releaseLayerReferences will release all the image top layer references
+func (l *tarexporter) releaseLayerReferences(imgDescr map[image.ID]*imageDescriptor) error {
+ for _, descr := range imgDescr {
+ if descr.layerRef != nil {
+ l.ls.Release(descr.layerRef)
+ }
+ }
+ return nil
+}
+
+func (s *saveSession) save(outStream io.Writer) error {
+ s.savedLayers = make(map[string]struct{})
+ s.diffIDPaths = make(map[layer.DiffID]string)
+
+ // get image json
+ tempDir, err := ioutil.TempDir("", "docker-export-")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tempDir)
+
+ s.outDir = tempDir
+ reposLegacy := make(map[string]map[string]string)
+
+ var manifest []manifestItem
+ var parentLinks []parentLink
+
+ for id, imageDescr := range s.images {
+ foreignSrcs, err := s.saveImage(id)
+ if err != nil {
+ return err
+ }
+
+ var repoTags []string
+ var layers []string
+
+ for _, ref := range imageDescr.refs {
+ familiarName := reference.FamiliarName(ref)
+ if _, ok := reposLegacy[familiarName]; !ok {
+ reposLegacy[familiarName] = make(map[string]string)
+ }
+ reposLegacy[familiarName][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1]
+ repoTags = append(repoTags, reference.FamiliarString(ref))
+ }
+
+ for _, l := range imageDescr.layers {
+ layers = append(layers, filepath.Join(l, legacyLayerFileName))
+ }
+
+ manifest = append(manifest, manifestItem{
+ Config: id.Digest().Hex() + ".json",
+ RepoTags: repoTags,
+ Layers: layers,
+ LayerSources: foreignSrcs,
+ })
+
+ parentID, _ := s.is.GetParent(id)
+ parentLinks = append(parentLinks, parentLink{id, parentID})
+ s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save")
+ }
+
+ for i, p := range validatedParentLinks(parentLinks) {
+ if p.parentID != "" {
+ manifest[i].Parent = p.parentID
+ }
+ }
+
+ if len(reposLegacy) > 0 {
+ reposFile := filepath.Join(tempDir, legacyRepositoriesFileName)
+ rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return err
+ }
+
+ if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil {
+ rf.Close()
+ return err
+ }
+
+ rf.Close()
+
+ if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
+ return err
+ }
+ }
+
+ manifestFileName := filepath.Join(tempDir, manifestFileName)
+ f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return err
+ }
+
+ if err := json.NewEncoder(f).Encode(manifest); err != nil {
+ f.Close()
+ return err
+ }
+
+ f.Close()
+
+ if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
+ return err
+ }
+
+ fs, err := archive.Tar(tempDir, archive.Uncompressed)
+ if err != nil {
+ return err
+ }
+ defer fs.Close()
+
+ _, err = io.Copy(outStream, fs)
+ return err
+}
+
+func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) {
+ img := s.images[id].image
+ if len(img.RootFS.DiffIDs) == 0 {
+ return nil, fmt.Errorf("empty export - not implemented")
+ }
+
+ var parent digest.Digest
+ var layers []string
+ var foreignSrcs map[layer.DiffID]distribution.Descriptor
+ for i := range img.RootFS.DiffIDs {
+ v1Img := image.V1Image{
+ // This is for backward compatibility used for
+ // pre v1.9 docker.
+ Created: time.Unix(0, 0),
+ }
+ if i == len(img.RootFS.DiffIDs)-1 {
+ v1Img = img.V1Image
+ }
+ rootFS := *img.RootFS
+ rootFS.DiffIDs = rootFS.DiffIDs[:i+1]
+ v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent)
+ if err != nil {
+ return nil, err
+ }
+
+ v1Img.ID = v1ID.Hex()
+ if parent != "" {
+ v1Img.Parent = parent.Hex()
+ }
+
+ src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created)
+ if err != nil {
+ return nil, err
+ }
+ layers = append(layers, v1Img.ID)
+ parent = v1ID
+ if src.Digest != "" {
+ if foreignSrcs == nil {
+ foreignSrcs = make(map[layer.DiffID]distribution.Descriptor)
+ }
+ foreignSrcs[img.RootFS.DiffIDs[i]] = src
+ }
+ }
+
+ configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json")
+ if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil {
+ return nil, err
+ }
+ if err := system.Chtimes(configFile, img.Created, img.Created); err != nil {
+ return nil, err
+ }
+
+ s.images[id].layers = layers
+ return foreignSrcs, nil
+}
+
+func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) {
+ if _, exists := s.savedLayers[legacyImg.ID]; exists {
+ return distribution.Descriptor{}, nil
+ }
+
+ outDir := filepath.Join(s.outDir, legacyImg.ID)
+ if err := os.Mkdir(outDir, 0755); err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ // todo: why is this version file here?
+ if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ imageConfig, err := json.Marshal(legacyImg)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ // serialize filesystem
+ layerPath := filepath.Join(outDir, legacyLayerFileName)
+ l, err := s.ls.Get(id)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer layer.ReleaseAndLog(s.ls, l)
+
+ if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists {
+ relPath, err := filepath.Rel(outDir, oldPath)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ if err := os.Symlink(relPath, layerPath); err != nil {
+ return distribution.Descriptor{}, errors.Wrap(err, "error creating symlink while saving layer")
+ }
+ } else {
+ // Use system.CreateSequential rather than os.Create. This ensures sequential
+ // file access on Windows to avoid eating into MM standby list.
+ // On Linux, this equates to a regular os.Create.
+ tarFile, err := system.CreateSequential(layerPath)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer tarFile.Close()
+
+ arch, err := l.TarStream()
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer arch.Close()
+
+ if _, err := io.Copy(tarFile, arch); err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} {
+ // todo: maybe save layer created timestamp?
+ if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil {
+ return distribution.Descriptor{}, err
+ }
+ }
+
+ s.diffIDPaths[l.DiffID()] = layerPath
+ }
+ s.savedLayers[legacyImg.ID] = struct{}{}
+
+ var src distribution.Descriptor
+ if fs, ok := l.(distribution.Describable); ok {
+ src = fs.Descriptor()
+ }
+ return src, nil
+}
diff --git a/image/tarexport/tarexport.go b/image/tarexport/tarexport.go
new file mode 100644
index 0000000..f7fab74
--- /dev/null
+++ b/image/tarexport/tarexport.go
@@ -0,0 +1,47 @@
+package tarexport
+
+import (
+ "github.com/docker/distribution"
+ "github.com/docker/docker/image"
+ "github.com/docker/docker/layer"
+ refstore "github.com/docker/docker/reference"
+)
+
+const (
+ manifestFileName = "manifest.json"
+ legacyLayerFileName = "layer.tar"
+ legacyConfigFileName = "json"
+ legacyVersionFileName = "VERSION"
+ legacyRepositoriesFileName = "repositories"
+)
+
+type manifestItem struct {
+ Config string
+ RepoTags []string
+ Layers []string
+ Parent image.ID `json:",omitempty"`
+ LayerSources map[layer.DiffID]distribution.Descriptor `json:",omitempty"`
+}
+
+type tarexporter struct {
+ is image.Store
+ ls layer.Store
+ rs refstore.Store
+ loggerImgEvent LogImageEvent
+}
+
+// LogImageEvent defines interface for event generation related to image tar(load and save) operations
+type LogImageEvent interface {
+ //LogImageEvent generates an event related to an image operation
+ LogImageEvent(imageID, refName, action string)
+}
+
+// NewTarExporter returns new Exporter for tar packages
+func NewTarExporter(is image.Store, ls layer.Store, rs refstore.Store, loggerImgEvent LogImageEvent) image.Exporter {
+ return &tarexporter{
+ is: is,
+ ls: ls,
+ rs: rs,
+ loggerImgEvent: loggerImgEvent,
+ }
+}
diff --git a/image/v1/imagev1.go b/image/v1/imagev1.go
new file mode 100644
index 0000000..0e8a23c
--- /dev/null
+++ b/image/v1/imagev1.go
@@ -0,0 +1,150 @@
+package v1
+
+import (
+ "encoding/json"
+ "reflect"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/docker/docker/image"
+ "github.com/docker/docker/layer"
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/opencontainers/go-digest"
+)
+
+// noFallbackMinVersion is the minimum version for which v1compatibility
+// information will not be marshaled through the Image struct to remove
+// blank fields.
+var noFallbackMinVersion = "1.8.3"
+
+// HistoryFromConfig creates a History struct from v1 configuration JSON
+func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) {
+ h := image.History{}
+ var v1Image image.V1Image
+ if err := json.Unmarshal(imageJSON, &v1Image); err != nil {
+ return h, err
+ }
+
+ return image.History{
+ Author: v1Image.Author,
+ Created: v1Image.Created,
+ CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "),
+ Comment: v1Image.Comment,
+ EmptyLayer: emptyLayer,
+ }, nil
+}
+
+// CreateID creates an ID from v1 image, layerID and parent ID.
+// Used for backwards compatibility with old clients.
+func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) {
+ v1Image.ID = ""
+ v1JSON, err := json.Marshal(v1Image)
+ if err != nil {
+ return "", err
+ }
+
+ var config map[string]*json.RawMessage
+ if err := json.Unmarshal(v1JSON, &config); err != nil {
+ return "", err
+ }
+
+ // FIXME: note that this is slightly incompatible with RootFS logic
+ config["layer_id"] = rawJSON(layerID)
+ if parent != "" {
+ config["parent"] = rawJSON(parent)
+ }
+
+ configJSON, err := json.Marshal(config)
+ if err != nil {
+ return "", err
+ }
+ logrus.Debugf("CreateV1ID %s", configJSON)
+
+ return digest.FromBytes(configJSON), nil
+}
+
+// MakeConfigFromV1Config creates an image config from the legacy V1 config format.
+func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) {
+ var dver struct {
+ DockerVersion string `json:"docker_version"`
+ }
+
+ if err := json.Unmarshal(imageJSON, &dver); err != nil {
+ return nil, err
+ }
+
+ useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion)
+
+ if useFallback {
+ var v1Image image.V1Image
+ err := json.Unmarshal(imageJSON, &v1Image)
+ if err != nil {
+ return nil, err
+ }
+ imageJSON, err = json.Marshal(v1Image)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var c map[string]*json.RawMessage
+ if err := json.Unmarshal(imageJSON, &c); err != nil {
+ return nil, err
+ }
+
+ delete(c, "id")
+ delete(c, "parent")
+ delete(c, "Size") // Size is calculated from data on disk and is inconsistent
+ delete(c, "parent_id")
+ delete(c, "layer_id")
+ delete(c, "throwaway")
+
+ c["rootfs"] = rawJSON(rootfs)
+ c["history"] = rawJSON(history)
+
+ return json.Marshal(c)
+}
+
+// MakeV1ConfigFromConfig creates a legacy V1 image config from an Image struct
+func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
+ // Top-level v1compatibility string should be a modified version of the
+ // image config.
+ var configAsMap map[string]*json.RawMessage
+ if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil {
+ return nil, err
+ }
+
+ // Delete fields that didn't exist in old manifest
+ imageType := reflect.TypeOf(img).Elem()
+ for i := 0; i < imageType.NumField(); i++ {
+ f := imageType.Field(i)
+ jsonName := strings.Split(f.Tag.Get("json"), ",")[0]
+ // Parent is handled specially below.
+ if jsonName != "" && jsonName != "parent" {
+ delete(configAsMap, jsonName)
+ }
+ }
+ configAsMap["id"] = rawJSON(v1ID)
+ if parentV1ID != "" {
+ configAsMap["parent"] = rawJSON(parentV1ID)
+ }
+ if throwaway {
+ configAsMap["throwaway"] = rawJSON(true)
+ }
+
+ return json.Marshal(configAsMap)
+}
+
+func rawJSON(value interface{}) *json.RawMessage {
+ jsonval, err := json.Marshal(value)
+ if err != nil {
+ return nil
+ }
+ return (*json.RawMessage)(&jsonval)
+}
+
+// ValidateID checks whether an ID string is a valid image ID.
+func ValidateID(id string) error {
+ return stringid.ValidateID(id)
+}
diff --git a/image/v1/imagev1_test.go b/image/v1/imagev1_test.go
new file mode 100644
index 0000000..936c55e
--- /dev/null
+++ b/image/v1/imagev1_test.go
@@ -0,0 +1,55 @@
+package v1
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/docker/docker/image"
+)
+
+func TestMakeV1ConfigFromConfig(t *testing.T) {
+ img := &image.Image{
+ V1Image: image.V1Image{
+ ID: "v2id",
+ Parent: "v2parent",
+ OS: "os",
+ },
+ OSVersion: "osversion",
+ RootFS: &image.RootFS{
+ Type: "layers",
+ },
+ }
+ v2js, err := json.Marshal(img)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Convert the image back in order to get RawJSON() support.
+ img, err = image.NewFromJSON(v2js)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ js, err := MakeV1ConfigFromConfig(img, "v1id", "v1parent", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ newimg := &image.Image{}
+ err = json.Unmarshal(js, newimg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if newimg.V1Image.ID != "v1id" || newimg.Parent != "v1parent" {
+ t.Error("ids should have changed", newimg.V1Image.ID, newimg.V1Image.Parent)
+ }
+
+ if newimg.RootFS != nil {
+ t.Error("rootfs should have been removed")
+ }
+
+ if newimg.V1Image.OS != "os" {
+ t.Error("os should have been preserved")
+ }
+}