1package layer // import "github.com/docker/docker/layer"
2
3import (
4	"compress/gzip"
5	"errors"
6	"io"
7	"os"
8
9	digest "github.com/opencontainers/go-digest"
10	"github.com/sirupsen/logrus"
11	"github.com/vbatts/tar-split/tar/asm"
12	"github.com/vbatts/tar-split/tar/storage"
13)
14
15func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) {
16	defer func() {
17		if err != nil {
18			diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath)
19		}
20	}()
21
22	if oldTarDataPath == "" {
23		err = errors.New("no tar-split file")
24		return
25	}
26
27	tarDataFile, err := os.Open(oldTarDataPath)
28	if err != nil {
29		return
30	}
31	defer tarDataFile.Close()
32	uncompressed, err := gzip.NewReader(tarDataFile)
33	if err != nil {
34		return
35	}
36
37	dgst := digest.Canonical.Digester()
38	err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash())
39	if err != nil {
40		return
41	}
42
43	diffID = DiffID(dgst.Digest())
44	err = os.RemoveAll(newTarDataPath)
45	if err != nil {
46		return
47	}
48	err = os.Link(oldTarDataPath, newTarDataPath)
49
50	return
51}
52
53func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) {
54	rawarchive, err := ls.driver.Diff(id, parent)
55	if err != nil {
56		return
57	}
58	defer rawarchive.Close()
59
60	f, err := os.Create(newTarDataPath)
61	if err != nil {
62		return
63	}
64	defer f.Close()
65	mfz := gzip.NewWriter(f)
66	defer mfz.Close()
67	metaPacker := storage.NewJSONPacker(mfz)
68
69	packerCounter := &packSizeCounter{metaPacker, &size}
70
71	archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil)
72	if err != nil {
73		return
74	}
75	dgst, err := digest.FromReader(archive)
76	if err != nil {
77		return
78	}
79	diffID = DiffID(dgst)
80	return
81}
82
83func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) {
84	// err is used to hold the error which will always trigger
85	// cleanup of creates sources but may not be an error returned
86	// to the caller (already exists).
87	var err error
88	var p *roLayer
89	if string(parent) != "" {
90		p = ls.get(parent)
91		if p == nil {
92			return nil, ErrLayerDoesNotExist
93		}
94
95		// Release parent chain if error
96		defer func() {
97			if err != nil {
98				ls.layerL.Lock()
99				ls.releaseLayer(p)
100				ls.layerL.Unlock()
101			}
102		}()
103	}
104
105	// Create new roLayer
106	layer := &roLayer{
107		parent:         p,
108		cacheID:        graphID,
109		referenceCount: 1,
110		layerStore:     ls,
111		references:     map[Layer]struct{}{},
112		diffID:         diffID,
113		size:           size,
114		chainID:        createChainIDFromParent(parent, diffID),
115	}
116
117	ls.layerL.Lock()
118	defer ls.layerL.Unlock()
119
120	if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
121		// Set error for cleanup, but do not return
122		err = errors.New("layer already exists")
123		return existingLayer.getReference(), nil
124	}
125
126	tx, err := ls.store.StartTransaction()
127	if err != nil {
128		return nil, err
129	}
130
131	defer func() {
132		if err != nil {
133			logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err)
134			if err := tx.Cancel(); err != nil {
135				logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
136			}
137		}
138	}()
139
140	tsw, err := tx.TarSplitWriter(false)
141	if err != nil {
142		return nil, err
143	}
144	defer tsw.Close()
145	tdf, err := os.Open(tarDataFile)
146	if err != nil {
147		return nil, err
148	}
149	defer tdf.Close()
150	_, err = io.Copy(tsw, tdf)
151	if err != nil {
152		return nil, err
153	}
154
155	if err = storeLayer(tx, layer); err != nil {
156		return nil, err
157	}
158
159	if err = tx.Commit(layer.chainID); err != nil {
160		return nil, err
161	}
162
163	ls.layerMap[layer.chainID] = layer
164
165	return layer.getReference(), nil
166}
167
168type unpackSizeCounter struct {
169	unpacker storage.Unpacker
170	size     *int64
171}
172
173func (u *unpackSizeCounter) Next() (*storage.Entry, error) {
174	e, err := u.unpacker.Next()
175	if err == nil && u.size != nil {
176		*u.size += e.Size
177	}
178	return e, err
179}
180
181type packSizeCounter struct {
182	packer storage.Packer
183	size   *int64
184}
185
186func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) {
187	n, err := p.packer.AddEntry(e)
188	if err == nil && p.size != nil {
189		*p.size += e.Size
190	}
191	return n, err
192}
193