1package deb
2
3import (
4	"bytes"
5	"fmt"
6	"io"
7	"io/ioutil"
8	"os"
9	"path/filepath"
10	"sort"
11	"strings"
12	"text/template"
13
14	"github.com/aptly-dev/aptly/aptly"
15	"github.com/aptly-dev/aptly/pgp"
16	"github.com/aptly-dev/aptly/utils"
17)
18
19// Changes is a result of .changes file parsing
20type Changes struct {
21	Changes               string
22	Distribution          string
23	Files                 PackageFiles
24	BasePath, ChangesName string
25	TempDir               string
26	Source                string
27	Binary                []string
28	Architectures         []string
29	Stanza                Stanza
30	SignatureKeys         []pgp.Key
31}
32
33// NewChanges moves .changes file into temporary directory and creates Changes structure
34func NewChanges(path string) (*Changes, error) {
35	var err error
36
37	c := &Changes{
38		BasePath:    filepath.Dir(path),
39		ChangesName: filepath.Base(path),
40	}
41
42	c.TempDir, err = ioutil.TempDir(os.TempDir(), "aptly")
43	if err != nil {
44		return nil, err
45	}
46
47	// copy .changes file into temporary directory
48	err = utils.CopyFile(filepath.Join(c.BasePath, c.ChangesName), filepath.Join(c.TempDir, c.ChangesName))
49	if err != nil {
50		return nil, err
51	}
52
53	return c, nil
54}
55
56// VerifyAndParse does optional signature verification and parses changes files
57func (c *Changes) VerifyAndParse(acceptUnsigned, ignoreSignature bool, verifier pgp.Verifier) error {
58	input, err := os.Open(filepath.Join(c.TempDir, c.ChangesName))
59	if err != nil {
60		return err
61	}
62	defer input.Close()
63
64	isClearSigned, err := verifier.IsClearSigned(input)
65	if err != nil {
66		return err
67	}
68
69	input.Seek(0, 0)
70
71	if !isClearSigned && !acceptUnsigned {
72		return fmt.Errorf(".changes file is not signed and unsigned processing hasn't been enabled")
73	}
74
75	if isClearSigned && !ignoreSignature {
76		var keyInfo *pgp.KeyInfo
77		keyInfo, err = verifier.VerifyClearsigned(input, false)
78		if err != nil {
79			return err
80		}
81		input.Seek(0, 0)
82
83		c.SignatureKeys = keyInfo.GoodKeys
84	}
85
86	var text io.ReadCloser
87
88	if isClearSigned {
89		text, err = verifier.ExtractClearsigned(input)
90		if err != nil {
91			return err
92		}
93		defer text.Close()
94	} else {
95		text = input
96	}
97
98	reader := NewControlFileReader(text, false, false)
99	c.Stanza, err = reader.ReadStanza()
100	if err != nil {
101		return err
102	}
103
104	c.Distribution = c.Stanza["Distribution"]
105	c.Changes = c.Stanza["Changes"]
106	c.Source = c.Stanza["Source"]
107	c.Binary = strings.Fields(c.Stanza["Binary"])
108	c.Architectures = strings.Fields(c.Stanza["Architecture"])
109
110	c.Files, err = c.Files.ParseSumFields(c.Stanza)
111	return err
112}
113
114// Prepare creates temporary directory, copies file there and verifies checksums
115func (c *Changes) Prepare() error {
116	var err error
117
118	for _, file := range c.Files {
119		if filepath.Dir(file.Filename) != "." {
120			return fmt.Errorf("file is not in the same folder as .changes file: %s", file.Filename)
121		}
122
123		file.Filename = filepath.Base(file.Filename)
124
125		err = utils.CopyFile(filepath.Join(c.BasePath, file.Filename), filepath.Join(c.TempDir, file.Filename))
126		if err != nil {
127			return err
128		}
129	}
130
131	for _, file := range c.Files {
132		var info utils.ChecksumInfo
133
134		info, err = utils.ChecksumsForFile(filepath.Join(c.TempDir, file.Filename))
135		if err != nil {
136			return err
137		}
138
139		if info.Size != file.Checksums.Size {
140			return fmt.Errorf("size mismatch: expected %v != obtained %v", file.Checksums.Size, info.Size)
141		}
142
143		if info.MD5 != file.Checksums.MD5 {
144			return fmt.Errorf("checksum mismatch MD5: expected %v != obtained %v", file.Checksums.MD5, info.MD5)
145		}
146
147		if info.SHA1 != file.Checksums.SHA1 {
148			return fmt.Errorf("checksum mismatch SHA1: expected %v != obtained %v", file.Checksums.SHA1, info.SHA1)
149		}
150
151		if info.SHA256 != file.Checksums.SHA256 {
152			return fmt.Errorf("checksum mismatch SHA256 expected %v != obtained %v", file.Checksums.SHA256, info.SHA256)
153		}
154	}
155
156	return nil
157}
158
159// Cleanup removes all temporary files
160func (c *Changes) Cleanup() error {
161	if c.TempDir == "" {
162		return nil
163	}
164
165	return os.RemoveAll(c.TempDir)
166}
167
168// PackageQuery returns query that every package should match to be included
169func (c *Changes) PackageQuery() PackageQuery {
170	var archQuery PackageQuery = &FieldQuery{Field: "$Architecture", Relation: VersionEqual, Value: ""}
171	for _, arch := range c.Architectures {
172		archQuery = &OrQuery{L: &FieldQuery{Field: "$Architecture", Relation: VersionEqual, Value: arch}, R: archQuery}
173	}
174
175	// if c.Source is empty, this would never match
176	sourceQuery := &AndQuery{
177		L: &FieldQuery{Field: "$PackageType", Relation: VersionEqual, Value: ArchitectureSource},
178		R: &FieldQuery{Field: "Name", Relation: VersionEqual, Value: c.Source},
179	}
180
181	var binaryQuery PackageQuery
182	if len(c.Binary) > 0 {
183		binaryQuery = &FieldQuery{Field: "Name", Relation: VersionEqual, Value: c.Binary[0]}
184		// matching debug ddeb packages, they're not present in the Binary field
185		var ddebQuery PackageQuery = &FieldQuery{Field: "Name", Relation: VersionEqual, Value: fmt.Sprintf("%s-dbgsym", c.Binary[0])}
186
187		for _, binary := range c.Binary[1:] {
188			binaryQuery = &OrQuery{
189				L: &FieldQuery{Field: "Name", Relation: VersionEqual, Value: binary},
190				R: binaryQuery,
191			}
192			ddebQuery = &OrQuery{
193				L: &FieldQuery{Field: "Name", Relation: VersionEqual, Value: fmt.Sprintf("%s-dbgsym", binary)},
194				R: ddebQuery,
195			}
196		}
197
198		ddebQuery = &AndQuery{
199			L: &FieldQuery{Field: "Source", Relation: VersionEqual, Value: c.Source},
200			R: ddebQuery,
201		}
202
203		binaryQuery = &OrQuery{
204			L: binaryQuery,
205			R: ddebQuery,
206		}
207
208		binaryQuery = &AndQuery{
209			L: &NotQuery{Q: &FieldQuery{Field: "$PackageType", Relation: VersionEqual, Value: ArchitectureSource}},
210			R: binaryQuery}
211	}
212
213	var nameQuery PackageQuery
214	if binaryQuery == nil {
215		nameQuery = sourceQuery
216	} else {
217		nameQuery = &OrQuery{L: sourceQuery, R: binaryQuery}
218	}
219
220	return &AndQuery{L: archQuery, R: nameQuery}
221}
222
223// GetField implements PackageLike interface
224func (c *Changes) GetField(field string) string {
225	return c.Stanza[field]
226}
227
228// MatchesDependency implements PackageLike interface
229func (c *Changes) MatchesDependency(d Dependency) bool {
230	return false
231}
232
233// MatchesArchitecture implements PackageLike interface
234func (c *Changes) MatchesArchitecture(arch string) bool {
235	return false
236}
237
238// GetName implements PackageLike interface
239func (c *Changes) GetName() string {
240	return ""
241}
242
243// GetVersion implements PackageLike interface
244func (c *Changes) GetVersion() string {
245	return ""
246
247}
248
249// GetArchitecture implements PackageLike interface
250func (c *Changes) GetArchitecture() string {
251	return ""
252}
253
254// CollectChangesFiles walks filesystem collecting all .changes files
255func CollectChangesFiles(locations []string, reporter aptly.ResultReporter) (changesFiles, failedFiles []string) {
256	for _, location := range locations {
257		info, err2 := os.Stat(location)
258		if err2 != nil {
259			reporter.Warning("Unable to process %s: %s", location, err2)
260			failedFiles = append(failedFiles, location)
261			continue
262		}
263		if info.IsDir() {
264			err2 = filepath.Walk(location, func(path string, info os.FileInfo, err3 error) error {
265				if err3 != nil {
266					return err3
267				}
268				if info.IsDir() {
269					return nil
270				}
271
272				if strings.HasSuffix(info.Name(), ".changes") {
273					changesFiles = append(changesFiles, path)
274				}
275
276				return nil
277			})
278
279			if err2 != nil {
280				reporter.Warning("Unable to process %s: %s", location, err2)
281				failedFiles = append(failedFiles, location)
282				continue
283			}
284		} else if strings.HasSuffix(info.Name(), ".changes") {
285			changesFiles = append(changesFiles, location)
286		}
287	}
288
289	sort.Strings(changesFiles)
290
291	return
292}
293
294// ImportChangesFiles imports referenced files in changes files into local repository
295func ImportChangesFiles(changesFiles []string, reporter aptly.ResultReporter, acceptUnsigned, ignoreSignatures, forceReplace, noRemoveFiles bool,
296	verifier pgp.Verifier, repoTemplateString string, progress aptly.Progress, localRepoCollection *LocalRepoCollection, packageCollection *PackageCollection,
297	pool aptly.PackagePool, checksumStorage aptly.ChecksumStorage, uploaders *Uploaders, parseQuery parseQuery) (processedFiles []string, failedFiles []string, err error) {
298
299	var repoTemplate *template.Template
300	repoTemplate, err = template.New("repo").Parse(repoTemplateString)
301	if err != nil {
302		return nil, nil, fmt.Errorf("error parsing -repo template: %s", err)
303	}
304	for _, path := range changesFiles {
305		var changes *Changes
306
307		changes, err = NewChanges(path)
308		if err != nil {
309			failedFiles = append(failedFiles, path)
310			reporter.Warning("unable to process file %s: %s", path, err)
311			continue
312		}
313
314		err = changes.VerifyAndParse(acceptUnsigned, ignoreSignatures, verifier)
315		if err != nil {
316			failedFiles = append(failedFiles, path)
317			reporter.Warning("unable to process file %s: %s", changes.ChangesName, err)
318			changes.Cleanup()
319			continue
320		}
321
322		err = changes.Prepare()
323		if err != nil {
324			failedFiles = append(failedFiles, path)
325			reporter.Warning("unable to process file %s: %s", changes.ChangesName, err)
326			changes.Cleanup()
327			continue
328		}
329
330		repoName := &bytes.Buffer{}
331		err = repoTemplate.Execute(repoName, changes.Stanza)
332		if err != nil {
333			return nil, nil, fmt.Errorf("error applying template to repo: %s", err)
334		}
335
336		if progress != nil {
337			progress.Printf("Loading repository %s for changes file %s...\n", repoName.String(), changes.ChangesName)
338		}
339
340		var repo *LocalRepo
341		repo, err = localRepoCollection.ByName(repoName.String())
342		if err != nil {
343			failedFiles = append(failedFiles, path)
344			reporter.Warning("unable to process file %s: %s", changes.ChangesName, err)
345			changes.Cleanup()
346			continue
347		}
348
349		currentUploaders := uploaders
350		if repo.Uploaders != nil {
351			currentUploaders = repo.Uploaders
352			for i := range currentUploaders.Rules {
353				currentUploaders.Rules[i].CompiledCondition, err = parseQuery(currentUploaders.Rules[i].Condition)
354				if err != nil {
355					return nil, nil, fmt.Errorf("error parsing query %s: %s", currentUploaders.Rules[i].Condition, err)
356				}
357			}
358		}
359
360		if currentUploaders != nil {
361			if err = currentUploaders.IsAllowed(changes); err != nil {
362				failedFiles = append(failedFiles, path)
363				reporter.Warning("changes file skipped due to uploaders config: %s, keys %#v: %s",
364					changes.ChangesName, changes.SignatureKeys, err)
365				changes.Cleanup()
366				continue
367			}
368		}
369
370		err = localRepoCollection.LoadComplete(repo)
371		if err != nil {
372			return nil, nil, fmt.Errorf("unable to load repo: %s", err)
373		}
374
375		var list *PackageList
376		list, err = NewPackageListFromRefList(repo.RefList(), packageCollection, progress)
377		if err != nil {
378			return nil, nil, fmt.Errorf("unable to load packages: %s", err)
379		}
380
381		packageFiles, otherFiles, _ := CollectPackageFiles([]string{changes.TempDir}, reporter)
382
383		restriction := changes.PackageQuery()
384		var processedFiles2, failedFiles2 []string
385
386		processedFiles2, failedFiles2, err = ImportPackageFiles(list, packageFiles, forceReplace, verifier, pool,
387			packageCollection, reporter, restriction, checksumStorage)
388
389		if err != nil {
390			return nil, nil, fmt.Errorf("unable to import package files: %s", err)
391		}
392
393		repo.UpdateRefList(NewPackageRefListFromPackageList(list))
394
395		err = localRepoCollection.Update(repo)
396		if err != nil {
397			return nil, nil, fmt.Errorf("unable to save: %s", err)
398		}
399
400		err = changes.Cleanup()
401		if err != nil {
402			return nil, nil, err
403		}
404
405		for _, file := range failedFiles2 {
406			failedFiles = append(failedFiles, filepath.Join(changes.BasePath, filepath.Base(file)))
407		}
408
409		for _, file := range processedFiles2 {
410			processedFiles = append(processedFiles, filepath.Join(changes.BasePath, filepath.Base(file)))
411		}
412
413		for _, file := range otherFiles {
414			processedFiles = append(processedFiles, filepath.Join(changes.BasePath, filepath.Base(file)))
415		}
416
417		processedFiles = append(processedFiles, path)
418	}
419
420	if !noRemoveFiles {
421		processedFiles = utils.StrSliceDeduplicate(processedFiles)
422
423		for _, file := range processedFiles {
424			err = os.Remove(file)
425			if err != nil {
426				return nil, nil, fmt.Errorf("unable to remove file: %s", err)
427			}
428		}
429	}
430
431	return processedFiles, failedFiles, nil
432}
433