1package deb
2
3import (
4	"bufio"
5	"fmt"
6	"os"
7	"path"
8	"path/filepath"
9	"strings"
10
11	"github.com/aptly-dev/aptly/aptly"
12	"github.com/aptly-dev/aptly/pgp"
13	"github.com/aptly-dev/aptly/utils"
14)
15
16type indexFiles struct {
17	publishedStorage aptly.PublishedStorage
18	basePath         string
19	renameMap        map[string]string
20	generatedFiles   map[string]utils.ChecksumInfo
21	tempDir          string
22	suffix           string
23	indexes          map[string]*indexFile
24	acquireByHash    bool
25}
26
27type indexFile struct {
28	parent        *indexFiles
29	discardable   bool
30	compressable  bool
31	onlyGzip      bool
32	clearSign     bool
33	detachedSign  bool
34	acquireByHash bool
35	relativePath  string
36	tempFilename  string
37	tempFile      *os.File
38	w             *bufio.Writer
39}
40
41func (file *indexFile) BufWriter() (*bufio.Writer, error) {
42	if file.w == nil {
43		var err error
44		file.tempFilename = filepath.Join(file.parent.tempDir, strings.Replace(file.relativePath, "/", "_", -1))
45		file.tempFile, err = os.Create(file.tempFilename)
46		if err != nil {
47			return nil, fmt.Errorf("unable to create temporary index file: %s", err)
48		}
49
50		file.w = bufio.NewWriter(file.tempFile)
51	}
52
53	return file.w, nil
54}
55
56func (file *indexFile) Finalize(signer pgp.Signer) error {
57	if file.w == nil {
58		if file.discardable {
59			return nil
60		}
61		file.BufWriter()
62	}
63
64	err := file.w.Flush()
65	if err != nil {
66		file.tempFile.Close()
67		return fmt.Errorf("unable to write to index file: %s", err)
68	}
69
70	if file.compressable {
71		err = utils.CompressFile(file.tempFile, file.onlyGzip)
72		if err != nil {
73			file.tempFile.Close()
74			return fmt.Errorf("unable to compress index file: %s", err)
75		}
76	}
77
78	file.tempFile.Close()
79
80	exts := []string{""}
81	cksumExts := exts
82	if file.compressable {
83		exts = append(exts, ".gz", ".bz2")
84		cksumExts = exts
85		if file.onlyGzip {
86			exts = []string{".gz"}
87			cksumExts = []string{"", ".gz"}
88		}
89	}
90
91	for _, ext := range cksumExts {
92		var checksumInfo utils.ChecksumInfo
93
94		checksumInfo, err = utils.ChecksumsForFile(file.tempFilename + ext)
95		if err != nil {
96			return fmt.Errorf("unable to collect checksums: %s", err)
97		}
98		file.parent.generatedFiles[file.relativePath+ext] = checksumInfo
99	}
100
101	filedir := filepath.Dir(filepath.Join(file.parent.basePath, file.relativePath))
102
103	err = file.parent.publishedStorage.MkDir(filedir)
104	if err != nil {
105		return fmt.Errorf("unable to create dir: %s", err)
106	}
107
108	if file.acquireByHash {
109		for _, hash := range []string{"MD5Sum", "SHA1", "SHA256", "SHA512"} {
110			err = file.parent.publishedStorage.MkDir(filepath.Join(filedir, "by-hash", hash))
111			if err != nil {
112				return fmt.Errorf("unable to create dir: %s", err)
113			}
114		}
115	}
116
117	for _, ext := range exts {
118		err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+ext),
119			file.tempFilename+ext)
120		if err != nil {
121			return fmt.Errorf("unable to publish file: %s", err)
122		}
123
124		if file.parent.suffix != "" {
125			file.parent.renameMap[filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+ext)] =
126				filepath.Join(file.parent.basePath, file.relativePath+ext)
127		}
128
129		if file.acquireByHash {
130			sums := file.parent.generatedFiles[file.relativePath+ext]
131			for hash, sum := range map[string]string{"SHA512": sums.SHA512, "SHA256": sums.SHA256, "SHA1": sums.SHA1, "MD5Sum": sums.MD5} {
132				err = packageIndexByHash(file, ext, hash, sum)
133				if err != nil {
134					return fmt.Errorf("unable to build hash file: %s", err)
135				}
136			}
137		}
138	}
139
140	if signer != nil {
141		if file.detachedSign {
142			err = signer.DetachedSign(file.tempFilename, file.tempFilename+".gpg")
143			if err != nil {
144				return fmt.Errorf("unable to detached sign file: %s", err)
145			}
146
147			if file.parent.suffix != "" {
148				file.parent.renameMap[filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+".gpg")] =
149					filepath.Join(file.parent.basePath, file.relativePath+".gpg")
150			}
151
152			err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+".gpg"),
153				file.tempFilename+".gpg")
154			if err != nil {
155				return fmt.Errorf("unable to publish file: %s", err)
156			}
157
158		}
159
160		if file.clearSign {
161			err = signer.ClearSign(file.tempFilename, filepath.Join(filepath.Dir(file.tempFilename), "In"+filepath.Base(file.tempFilename)))
162			if err != nil {
163				return fmt.Errorf("unable to clearsign file: %s", err)
164			}
165
166			if file.parent.suffix != "" {
167				file.parent.renameMap[filepath.Join(file.parent.basePath, "In"+file.relativePath+file.parent.suffix)] =
168					filepath.Join(file.parent.basePath, "In"+file.relativePath)
169			}
170
171			err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, "In"+file.relativePath+file.parent.suffix),
172				filepath.Join(filepath.Dir(file.tempFilename), "In"+filepath.Base(file.tempFilename)))
173			if err != nil {
174				return fmt.Errorf("unable to publish file: %s", err)
175			}
176		}
177	}
178
179	return nil
180}
181
182func packageIndexByHash(file *indexFile, ext string, hash string, sum string) error {
183	src := filepath.Join(file.parent.basePath, file.relativePath)
184	indexfile := path.Base(src + ext)
185	src = src + file.parent.suffix + ext
186	filedir := filepath.Dir(filepath.Join(file.parent.basePath, file.relativePath))
187	dst := filepath.Join(filedir, "by-hash", hash)
188	sumfilePath := filepath.Join(dst, sum)
189
190	// link already exists? do nothing
191	exists, err := file.parent.publishedStorage.FileExists(sumfilePath)
192	if err != nil {
193		return fmt.Errorf("Acquire-By-Hash: error checking exists of file %s: %s", sumfilePath, err)
194	}
195	if exists {
196		return nil
197	}
198
199	// create the link
200	err = file.parent.publishedStorage.HardLink(src, sumfilePath)
201	if err != nil {
202		return fmt.Errorf("Acquire-By-Hash: error creating hardlink %s: %s", sumfilePath, err)
203	}
204
205	// if a previous index file already exists exists, backup symlink
206	indexPath := filepath.Join(dst, indexfile)
207	oldIndexPath := filepath.Join(dst, indexfile+".old")
208	if exists, _ = file.parent.publishedStorage.FileExists(indexPath); exists {
209		// if exists, remove old symlink
210		if exists, _ = file.parent.publishedStorage.FileExists(oldIndexPath); exists {
211			var linkTarget string
212			linkTarget, err = file.parent.publishedStorage.ReadLink(oldIndexPath)
213			if err == nil {
214				// If we managed to resolve the link target: delete it. This is the
215				// oldest physical index file we no longer need. Once we drop our
216				// old symlink we'll essentially forget about it existing at all.
217				file.parent.publishedStorage.Remove(linkTarget)
218			}
219			file.parent.publishedStorage.Remove(oldIndexPath)
220		}
221		file.parent.publishedStorage.RenameFile(indexPath, oldIndexPath)
222	}
223
224	// create symlink
225	err = file.parent.publishedStorage.SymLink(filepath.Join(dst, sum), filepath.Join(dst, indexfile))
226	if err != nil {
227		return fmt.Errorf("Acquire-By-Hash: error creating symlink %s: %s", filepath.Join(dst, indexfile), err)
228	}
229	return nil
230}
231
232func newIndexFiles(publishedStorage aptly.PublishedStorage, basePath, tempDir, suffix string, acquireByHash bool) *indexFiles {
233	return &indexFiles{
234		publishedStorage: publishedStorage,
235		basePath:         basePath,
236		renameMap:        make(map[string]string),
237		generatedFiles:   make(map[string]utils.ChecksumInfo),
238		tempDir:          tempDir,
239		suffix:           suffix,
240		indexes:          make(map[string]*indexFile),
241		acquireByHash:    acquireByHash,
242	}
243}
244
245func (files *indexFiles) PackageIndex(component, arch string, udeb, installer bool) *indexFile {
246	if arch == ArchitectureSource {
247		udeb = false
248	}
249	key := fmt.Sprintf("pi-%s-%s-%v-%v", component, arch, udeb, installer)
250	file, ok := files.indexes[key]
251	if !ok {
252		var relativePath string
253
254		if arch == ArchitectureSource {
255			relativePath = filepath.Join(component, "source", "Sources")
256		} else {
257			if udeb {
258				relativePath = filepath.Join(component, "debian-installer", fmt.Sprintf("binary-%s", arch), "Packages")
259			} else if installer {
260				relativePath = filepath.Join(component, fmt.Sprintf("installer-%s", arch), "current", "images", "SHA256SUMS")
261			} else {
262				relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Packages")
263			}
264		}
265
266		file = &indexFile{
267			parent:        files,
268			discardable:   false,
269			compressable:  !installer,
270			detachedSign:  installer,
271			clearSign:     false,
272			acquireByHash: files.acquireByHash,
273			relativePath:  relativePath,
274		}
275
276		files.indexes[key] = file
277	}
278
279	return file
280}
281
282func (files *indexFiles) ReleaseIndex(component, arch string, udeb bool) *indexFile {
283	if arch == ArchitectureSource {
284		udeb = false
285	}
286	key := fmt.Sprintf("ri-%s-%s-%v", component, arch, udeb)
287	file, ok := files.indexes[key]
288	if !ok {
289		var relativePath string
290
291		if arch == ArchitectureSource {
292			relativePath = filepath.Join(component, "source", "Release")
293		} else {
294			if udeb {
295				relativePath = filepath.Join(component, "debian-installer", fmt.Sprintf("binary-%s", arch), "Release")
296			} else {
297				relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Release")
298			}
299		}
300
301		file = &indexFile{
302			parent:        files,
303			discardable:   udeb,
304			compressable:  false,
305			detachedSign:  false,
306			clearSign:     false,
307			acquireByHash: files.acquireByHash,
308			relativePath:  relativePath,
309		}
310
311		files.indexes[key] = file
312	}
313
314	return file
315}
316
317func (files *indexFiles) ContentsIndex(component, arch string, udeb bool) *indexFile {
318	if arch == ArchitectureSource {
319		udeb = false
320	}
321	key := fmt.Sprintf("ci-%s-%s-%v", component, arch, udeb)
322	file, ok := files.indexes[key]
323	if !ok {
324		var relativePath string
325
326		if udeb {
327			relativePath = filepath.Join(component, fmt.Sprintf("Contents-udeb-%s", arch))
328		} else {
329			relativePath = filepath.Join(component, fmt.Sprintf("Contents-%s", arch))
330		}
331
332		file = &indexFile{
333			parent:        files,
334			discardable:   true,
335			compressable:  true,
336			onlyGzip:      true,
337			detachedSign:  false,
338			clearSign:     false,
339			acquireByHash: files.acquireByHash,
340			relativePath:  relativePath,
341		}
342
343		files.indexes[key] = file
344	}
345
346	return file
347}
348
349func (files *indexFiles) LegacyContentsIndex(arch string, udeb bool) *indexFile {
350	if arch == ArchitectureSource {
351		udeb = false
352	}
353	key := fmt.Sprintf("lci-%s-%v", arch, udeb)
354	file, ok := files.indexes[key]
355	if !ok {
356		var relativePath string
357
358		if udeb {
359			relativePath = fmt.Sprintf("Contents-udeb-%s", arch)
360		} else {
361			relativePath = fmt.Sprintf("Contents-%s", arch)
362		}
363
364		file = &indexFile{
365			parent:        files,
366			discardable:   true,
367			compressable:  true,
368			onlyGzip:      true,
369			detachedSign:  false,
370			clearSign:     false,
371			acquireByHash: files.acquireByHash,
372			relativePath:  relativePath,
373		}
374
375		files.indexes[key] = file
376	}
377
378	return file
379}
380
381func (files *indexFiles) ReleaseFile() *indexFile {
382	return &indexFile{
383		parent:       files,
384		discardable:  false,
385		compressable: false,
386		detachedSign: true,
387		clearSign:    true,
388		relativePath: "Release",
389	}
390}
391
392func (files *indexFiles) FinalizeAll(progress aptly.Progress, signer pgp.Signer) (err error) {
393	if progress != nil {
394		progress.InitBar(int64(len(files.indexes)), false)
395		defer progress.ShutdownBar()
396	}
397
398	for _, file := range files.indexes {
399		err = file.Finalize(signer)
400		if err != nil {
401			return
402		}
403		if progress != nil {
404			progress.AddBar(1)
405		}
406	}
407
408	files.indexes = make(map[string]*indexFile)
409
410	return
411}
412
413func (files *indexFiles) RenameFiles() error {
414	var err error
415
416	for oldName, newName := range files.renameMap {
417		err = files.publishedStorage.RenameFile(oldName, newName)
418		if err != nil {
419			return fmt.Errorf("unable to rename: %s", err)
420		}
421	}
422
423	return nil
424}
425