1package deb
2
3import (
4	"bufio"
5	"bytes"
6	"encoding/json"
7	"fmt"
8	"io/ioutil"
9	"log"
10	"os"
11	"path/filepath"
12	"sort"
13	"strings"
14	"sync"
15	"time"
16
17	"github.com/pborman/uuid"
18	"github.com/ugorji/go/codec"
19
20	"github.com/aptly-dev/aptly/aptly"
21	"github.com/aptly-dev/aptly/database"
22	"github.com/aptly-dev/aptly/pgp"
23	"github.com/aptly-dev/aptly/utils"
24)
25
26type repoSourceItem struct {
27	// Pointer to snapshot if SourceKind == "snapshot"
28	snapshot *Snapshot
29	// Pointer to local repo if SourceKind == "local"
30	localRepo *LocalRepo
31	// Package references is SourceKind == "local"
32	packageRefs *PackageRefList
33}
34
35// PublishedRepo is a published for http/ftp representation of snapshot as Debian repository
36type PublishedRepo struct {
37	// Internal unique ID
38	UUID string
39	// Storage & Prefix & distribution should be unique across all published repositories
40	Storage              string
41	Prefix               string
42	Distribution         string
43	Origin               string
44	NotAutomatic         string
45	ButAutomaticUpgrades string
46	Label                string
47	// Architectures is a list of all architectures published
48	Architectures []string
49	// SourceKind is "local"/"repo"
50	SourceKind string
51
52	// Map of sources by each component: component name -> source UUID
53	Sources map[string]string
54
55	// Legacy fields for compatibility with old published repositories (< 0.6)
56	Component string
57	// SourceUUID is UUID of either snapshot or local repo
58	SourceUUID string `codec:"SnapshotUUID"`
59	// Map of component to source items
60	sourceItems map[string]repoSourceItem
61
62	// Skip contents generation
63	SkipContents bool
64
65	// True if repo is being re-published
66	rePublishing bool
67
68	// Provide index files per hash also
69	AcquireByHash bool
70}
71
72// ParsePrefix splits [storage:]prefix into components
73func ParsePrefix(param string) (storage, prefix string) {
74	i := strings.LastIndex(param, ":")
75	if i != -1 {
76		storage = param[:i]
77		prefix = param[i+1:]
78		if prefix == "" {
79			prefix = "."
80		}
81	} else {
82		prefix = param
83	}
84	prefix = strings.TrimPrefix(strings.TrimSuffix(prefix, "/"), "/")
85	return
86}
87
88// walkUpTree goes from source in the tree of source snapshots/mirrors/local repos
89// gathering information about declared components and distributions
90func walkUpTree(source interface{}, collectionFactory *CollectionFactory) (rootDistributions []string, rootComponents []string) {
91	var (
92		head    interface{}
93		current = []interface{}{source}
94	)
95
96	rootComponents = []string{}
97	rootDistributions = []string{}
98
99	// walk up the tree from current source up to roots (local or remote repos)
100	// and collect information about distribution and components
101	for len(current) > 0 {
102		head, current = current[0], current[1:]
103
104		if snapshot, ok := head.(*Snapshot); ok {
105			for _, uuid := range snapshot.SourceIDs {
106				if snapshot.SourceKind == SourceRemoteRepo {
107					remoteRepo, err := collectionFactory.RemoteRepoCollection().ByUUID(uuid)
108					if err != nil {
109						continue
110					}
111					current = append(current, remoteRepo)
112				} else if snapshot.SourceKind == SourceLocalRepo {
113					localRepo, err := collectionFactory.LocalRepoCollection().ByUUID(uuid)
114					if err != nil {
115						continue
116					}
117					current = append(current, localRepo)
118				} else if snapshot.SourceKind == SourceSnapshot {
119					snap, err := collectionFactory.SnapshotCollection().ByUUID(uuid)
120					if err != nil {
121						continue
122					}
123					current = append(current, snap)
124				}
125			}
126		} else if localRepo, ok := head.(*LocalRepo); ok {
127			if localRepo.DefaultDistribution != "" {
128				rootDistributions = append(rootDistributions, localRepo.DefaultDistribution)
129			}
130			if localRepo.DefaultComponent != "" {
131				rootComponents = append(rootComponents, localRepo.DefaultComponent)
132			}
133		} else if remoteRepo, ok := head.(*RemoteRepo); ok {
134			if remoteRepo.Distribution != "" {
135				rootDistributions = append(rootDistributions, remoteRepo.Distribution)
136			}
137			rootComponents = append(rootComponents, remoteRepo.Components...)
138		} else {
139			panic("unknown type")
140		}
141	}
142
143	return
144}
145
146// NewPublishedRepo creates new published repository
147//
148// storage is PublishedStorage name
149// prefix specifies publishing prefix
150// distribution and architectures are user-defined properties
151// components & sources are lists of component to source mapping (*Snapshot or *LocalRepo)
152func NewPublishedRepo(storage, prefix, distribution string, architectures []string,
153	components []string, sources []interface{}, collectionFactory *CollectionFactory) (*PublishedRepo, error) {
154	result := &PublishedRepo{
155		UUID:          uuid.New(),
156		Storage:       storage,
157		Architectures: architectures,
158		Sources:       make(map[string]string),
159		sourceItems:   make(map[string]repoSourceItem),
160	}
161
162	if len(sources) == 0 {
163		panic("publish with empty sources")
164	}
165
166	if len(sources) != len(components) {
167		panic("sources and components should be equal in size")
168	}
169
170	var (
171		discoveredDistributions = []string{}
172		source                  interface{}
173		component               string
174		snapshot                *Snapshot
175		localRepo               *LocalRepo
176		fields                  = make(map[string][]string)
177	)
178
179	// get first source
180	source = sources[0]
181
182	// figure out source kind
183	switch source.(type) {
184	case *Snapshot:
185		result.SourceKind = SourceSnapshot
186	case *LocalRepo:
187		result.SourceKind = SourceLocalRepo
188	default:
189		panic("unknown source kind")
190	}
191
192	for i := range sources {
193		component, source = components[i], sources[i]
194		if distribution == "" || component == "" {
195			rootDistributions, rootComponents := walkUpTree(source, collectionFactory)
196			if distribution == "" {
197				for i := range rootDistributions {
198					rootDistributions[i] = strings.Replace(rootDistributions[i], "/", "-", -1)
199				}
200				discoveredDistributions = append(discoveredDistributions, rootDistributions...)
201			}
202			if component == "" {
203				sort.Strings(rootComponents)
204				if len(rootComponents) > 0 && rootComponents[0] == rootComponents[len(rootComponents)-1] {
205					component = rootComponents[0]
206				} else if len(sources) == 1 {
207					// only if going from one source, assume default component "main"
208					component = "main"
209				} else {
210					return nil, fmt.Errorf("unable to figure out component name for %s", source)
211				}
212			}
213		}
214
215		_, exists := result.Sources[component]
216		if exists {
217			return nil, fmt.Errorf("duplicate component name: %s", component)
218		}
219
220		if result.SourceKind == SourceSnapshot {
221			snapshot = source.(*Snapshot)
222			result.Sources[component] = snapshot.UUID
223			result.sourceItems[component] = repoSourceItem{snapshot: snapshot}
224
225			if !utils.StrSliceHasItem(fields["Origin"], snapshot.Origin) {
226				fields["Origin"] = append(fields["Origin"], snapshot.Origin)
227			}
228			if !utils.StrSliceHasItem(fields["NotAutomatic"], snapshot.NotAutomatic) {
229				fields["NotAutomatic"] = append(fields["NotAutomatic"], snapshot.NotAutomatic)
230			}
231			if !utils.StrSliceHasItem(fields["ButAutomaticUpgrades"], snapshot.ButAutomaticUpgrades) {
232				fields["ButAutomaticUpgrades"] = append(fields["ButAutomaticUpgrades"], snapshot.ButAutomaticUpgrades)
233			}
234		} else if result.SourceKind == SourceLocalRepo {
235			localRepo = source.(*LocalRepo)
236			result.Sources[component] = localRepo.UUID
237			result.sourceItems[component] = repoSourceItem{localRepo: localRepo, packageRefs: localRepo.RefList()}
238		}
239	}
240
241	// clean & verify prefix
242	prefix = filepath.Clean(prefix)
243	prefix = strings.TrimPrefix(strings.TrimSuffix(prefix, "/"), "/")
244	prefix = filepath.Clean(prefix)
245
246	for _, part := range strings.Split(prefix, "/") {
247		if part == ".." || part == "dists" || part == "pool" {
248			return nil, fmt.Errorf("invalid prefix %s", prefix)
249		}
250	}
251
252	result.Prefix = prefix
253
254	// guessing distribution
255	if distribution == "" {
256		sort.Strings(discoveredDistributions)
257		if len(discoveredDistributions) > 0 && discoveredDistributions[0] == discoveredDistributions[len(discoveredDistributions)-1] {
258			distribution = discoveredDistributions[0]
259		} else {
260			return nil, fmt.Errorf("unable to guess distribution name, please specify explicitly")
261		}
262	}
263
264	if strings.Contains(distribution, "/") {
265		return nil, fmt.Errorf("invalid distribution %s, '/' is not allowed", distribution)
266	}
267
268	result.Distribution = distribution
269
270	// only fields which are unique by all given snapshots are set on published
271	if len(fields["Origin"]) == 1 {
272		result.Origin = fields["Origin"][0]
273	}
274	if len(fields["NotAutomatic"]) == 1 {
275		result.NotAutomatic = fields["NotAutomatic"][0]
276	}
277	if len(fields["ButAutomaticUpgrades"]) == 1 {
278		result.ButAutomaticUpgrades = fields["ButAutomaticUpgrades"][0]
279	}
280
281	return result, nil
282}
283
284// MarshalJSON requires object to be "loeaded completely"
285func (p *PublishedRepo) MarshalJSON() ([]byte, error) {
286	type sourceInfo struct {
287		Component, Name string
288	}
289
290	sources := []sourceInfo{}
291	for component, item := range p.sourceItems {
292		name := ""
293		if item.snapshot != nil {
294			name = item.snapshot.Name
295		} else if item.localRepo != nil {
296			name = item.localRepo.Name
297		} else {
298			panic("no snapshot/local repo")
299		}
300		sources = append(sources, sourceInfo{
301			Component: component,
302			Name:      name,
303		})
304	}
305
306	return json.Marshal(map[string]interface{}{
307		"Architectures":        p.Architectures,
308		"Distribution":         p.Distribution,
309		"Label":                p.Label,
310		"Origin":               p.Origin,
311		"NotAutomatic":         p.NotAutomatic,
312		"ButAutomaticUpgrades": p.ButAutomaticUpgrades,
313		"Prefix":               p.Prefix,
314		"SourceKind":           p.SourceKind,
315		"Sources":              sources,
316		"Storage":              p.Storage,
317		"SkipContents":         p.SkipContents,
318		"AcquireByHash":        p.AcquireByHash,
319	})
320}
321
322// String returns human-readable representation of PublishedRepo
323func (p *PublishedRepo) String() string {
324	var sources = []string{}
325
326	for _, component := range p.Components() {
327		var source string
328
329		item := p.sourceItems[component]
330		if item.snapshot != nil {
331			source = item.snapshot.String()
332		} else if item.localRepo != nil {
333			source = item.localRepo.String()
334		} else {
335			panic("no snapshot/localRepo")
336		}
337
338		sources = append(sources, fmt.Sprintf("{%s: %s}", component, source))
339	}
340
341	var extras []string
342	var extra string
343
344	if p.Origin != "" {
345		extras = append(extras, fmt.Sprintf("origin: %s", p.Origin))
346	}
347
348	if p.NotAutomatic != "" {
349		extras = append(extras, fmt.Sprintf("notautomatic: %s", p.NotAutomatic))
350	}
351
352	if p.ButAutomaticUpgrades != "" {
353		extras = append(extras, fmt.Sprintf("butautomaticupgrades: %s", p.ButAutomaticUpgrades))
354	}
355
356	if p.Label != "" {
357		extras = append(extras, fmt.Sprintf("label: %s", p.Label))
358	}
359
360	extra = strings.Join(extras, ", ")
361
362	if extra != "" {
363		extra = " (" + extra + ")"
364	}
365
366	return fmt.Sprintf("%s/%s%s [%s] publishes %s", p.StoragePrefix(), p.Distribution, extra, strings.Join(p.Architectures, ", "),
367		strings.Join(sources, ", "))
368}
369
370// StoragePrefix returns combined storage & prefix for the repo
371func (p *PublishedRepo) StoragePrefix() string {
372	result := p.Prefix
373	if p.Storage != "" {
374		result = p.Storage + ":" + p.Prefix
375	}
376	return result
377}
378
379// Key returns unique key identifying PublishedRepo
380func (p *PublishedRepo) Key() []byte {
381	return []byte("U" + p.StoragePrefix() + ">>" + p.Distribution)
382}
383
384// RefKey is a unique id for package reference list
385func (p *PublishedRepo) RefKey(component string) []byte {
386	return []byte("E" + p.UUID + component)
387}
388
389// RefList returns list of package refs in local repo
390func (p *PublishedRepo) RefList(component string) *PackageRefList {
391	item := p.sourceItems[component]
392	if p.SourceKind == SourceLocalRepo {
393		return item.packageRefs
394	}
395	if p.SourceKind == SourceSnapshot {
396		return item.snapshot.RefList()
397	}
398	panic("unknown source")
399}
400
401// Components returns sorted list of published repo components
402func (p *PublishedRepo) Components() []string {
403	result := make([]string, 0, len(p.Sources))
404	for component := range p.Sources {
405		result = append(result, component)
406	}
407
408	sort.Strings(result)
409	return result
410}
411
412// UpdateLocalRepo updates content from local repo in component
413func (p *PublishedRepo) UpdateLocalRepo(component string) {
414	if p.SourceKind != SourceLocalRepo {
415		panic("not local repo publish")
416	}
417
418	item := p.sourceItems[component]
419	item.packageRefs = item.localRepo.RefList()
420	p.sourceItems[component] = item
421
422	p.rePublishing = true
423}
424
425// UpdateSnapshot switches snapshot for component
426func (p *PublishedRepo) UpdateSnapshot(component string, snapshot *Snapshot) {
427	if p.SourceKind != SourceSnapshot {
428		panic("not snapshot publish")
429	}
430
431	item := p.sourceItems[component]
432	item.snapshot = snapshot
433	p.sourceItems[component] = item
434
435	p.Sources[component] = snapshot.UUID
436	p.rePublishing = true
437}
438
439// Encode does msgpack encoding of PublishedRepo
440func (p *PublishedRepo) Encode() []byte {
441	var buf bytes.Buffer
442
443	encoder := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
444	encoder.Encode(p)
445
446	return buf.Bytes()
447}
448
449// Decode decodes msgpack representation into PublishedRepo
450func (p *PublishedRepo) Decode(input []byte) error {
451	decoder := codec.NewDecoderBytes(input, &codec.MsgpackHandle{})
452	err := decoder.Decode(p)
453	if err != nil {
454		return err
455	}
456
457	// old PublishedRepo were publishing only snapshots
458	if p.SourceKind == "" {
459		p.SourceKind = SourceSnapshot
460	}
461
462	// <0.6 aptly used single SourceUUID + Component instead of Sources
463	if p.Component != "" && p.SourceUUID != "" && len(p.Sources) == 0 {
464		p.Sources = map[string]string{p.Component: p.SourceUUID}
465		p.Component = ""
466		p.SourceUUID = ""
467	}
468
469	return nil
470}
471
472// GetOrigin returns default or manual Origin:
473func (p *PublishedRepo) GetOrigin() string {
474	if p.Origin == "" {
475		return p.Prefix + " " + p.Distribution
476	}
477	return p.Origin
478}
479
480// GetLabel returns default or manual Label:
481func (p *PublishedRepo) GetLabel() string {
482	if p.Label == "" {
483		return p.Prefix + " " + p.Distribution
484	}
485	return p.Label
486}
487
488// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them
489func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageProvider aptly.PublishedStorageProvider,
490	collectionFactory *CollectionFactory, signer pgp.Signer, progress aptly.Progress, forceOverwrite bool) error {
491	publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage)
492
493	err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool"))
494	if err != nil {
495		return err
496	}
497	basePath := filepath.Join(p.Prefix, "dists", p.Distribution)
498	err = publishedStorage.MkDir(basePath)
499	if err != nil {
500		return err
501	}
502
503	tempDB, err := collectionFactory.TemporaryDB()
504	if err != nil {
505		return err
506	}
507	defer func() {
508		e := tempDB.Close()
509		if e != nil && progress != nil {
510			progress.Printf("failed to close temp DB: %s", err)
511		}
512		e = tempDB.Drop()
513		if e != nil && progress != nil {
514			progress.Printf("failed to drop temp DB: %s", err)
515		}
516	}()
517
518	if progress != nil {
519		progress.Printf("Loading packages...\n")
520	}
521
522	lists := map[string]*PackageList{}
523
524	for component := range p.sourceItems {
525		// Load all packages
526		lists[component], err = NewPackageListFromRefList(p.RefList(component), collectionFactory.PackageCollection(), progress)
527		if err != nil {
528			return fmt.Errorf("unable to load packages: %s", err)
529		}
530	}
531
532	if !p.rePublishing {
533		if len(p.Architectures) == 0 {
534			for _, list := range lists {
535				p.Architectures = append(p.Architectures, list.Architectures(true)...)
536			}
537		}
538
539		if len(p.Architectures) == 0 {
540			return fmt.Errorf("unable to figure out list of architectures, please supply explicit list")
541		}
542
543		sort.Strings(p.Architectures)
544		p.Architectures = utils.StrSliceDeduplicate(p.Architectures)
545	}
546
547	var suffix string
548	if p.rePublishing {
549		suffix = ".tmp"
550	}
551
552	if progress != nil {
553		progress.Printf("Generating metadata files and linking package files...\n")
554	}
555
556	var tempDir string
557	tempDir, err = ioutil.TempDir(os.TempDir(), "aptly")
558	if err != nil {
559		return err
560	}
561	defer os.RemoveAll(tempDir)
562
563	indexes := newIndexFiles(publishedStorage, basePath, tempDir, suffix, p.AcquireByHash)
564
565	legacyContentIndexes := map[string]*ContentsIndex{}
566
567	for component, list := range lists {
568		hadUdebs := false
569
570		// For all architectures, pregenerate packages/sources files
571		for _, arch := range p.Architectures {
572			indexes.PackageIndex(component, arch, false, false)
573		}
574
575		if progress != nil {
576			progress.InitBar(int64(list.Len()), false)
577		}
578
579		list.PrepareIndex()
580
581		contentIndexes := map[string]*ContentsIndex{}
582
583		err = list.ForEachIndexed(func(pkg *Package) error {
584			if progress != nil {
585				progress.AddBar(1)
586			}
587
588			for _, arch := range p.Architectures {
589				if pkg.MatchesArchitecture(arch) {
590					hadUdebs = hadUdebs || pkg.IsUdeb
591
592					var relPath string
593					if !pkg.IsInstaller {
594						poolDir, err2 := pkg.PoolDirectory()
595						if err2 != nil {
596							return err2
597						}
598						relPath = filepath.Join("pool", component, poolDir)
599					} else {
600						relPath = filepath.Join("dists", p.Distribution, component, fmt.Sprintf("%s-%s", pkg.Name, arch), "current", "images")
601					}
602
603					err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, relPath, forceOverwrite)
604					if err != nil {
605						return err
606					}
607					break
608				}
609			}
610
611			// Start a db batch. If we fill contents data we'll need
612			// to push each path of the package into the database.
613			// We'll want this batched so as to avoid an excessive
614			// amount of write() calls.
615			tempDB.StartBatch()
616			defer tempDB.FinishBatch()
617
618			for _, arch := range p.Architectures {
619				if pkg.MatchesArchitecture(arch) {
620					var bufWriter *bufio.Writer
621
622					if !p.SkipContents && !pkg.IsInstaller {
623						key := fmt.Sprintf("%s-%v", arch, pkg.IsUdeb)
624						qualifiedName := []byte(pkg.QualifiedName())
625						contents := pkg.Contents(packagePool, progress)
626
627						for _, contentIndexesMap := range []map[string]*ContentsIndex{contentIndexes, legacyContentIndexes} {
628							contentIndex := contentIndexesMap[key]
629
630							if contentIndex == nil {
631								contentIndex = NewContentsIndex(tempDB)
632								contentIndexesMap[key] = contentIndex
633							}
634
635							contentIndex.Push(qualifiedName, contents)
636						}
637					}
638
639					bufWriter, err = indexes.PackageIndex(component, arch, pkg.IsUdeb, pkg.IsInstaller).BufWriter()
640					if err != nil {
641						return err
642					}
643
644					err = pkg.Stanza().WriteTo(bufWriter, pkg.IsSource, false, pkg.IsInstaller)
645					if err != nil {
646						return err
647					}
648					err = bufWriter.WriteByte('\n')
649					if err != nil {
650						return err
651					}
652				}
653			}
654
655			pkg.files = nil
656			pkg.deps = nil
657			pkg.extra = nil
658			pkg.contents = nil
659
660			return nil
661		})
662
663		if err != nil {
664			return fmt.Errorf("unable to process packages: %s", err)
665		}
666
667		for _, arch := range p.Architectures {
668			for _, udeb := range []bool{true, false} {
669				index := contentIndexes[fmt.Sprintf("%s-%v", arch, udeb)]
670				if index == nil || index.Empty() {
671					continue
672				}
673
674				var bufWriter *bufio.Writer
675				bufWriter, err = indexes.ContentsIndex(component, arch, udeb).BufWriter()
676				if err != nil {
677					return fmt.Errorf("unable to generate contents index: %v", err)
678				}
679
680				_, err = index.WriteTo(bufWriter)
681				if err != nil {
682					return fmt.Errorf("unable to generate contents index: %v", err)
683				}
684			}
685		}
686
687		if progress != nil {
688			progress.ShutdownBar()
689		}
690
691		udebs := []bool{false}
692		if hadUdebs {
693			udebs = append(udebs, true)
694
695			// For all architectures, pregenerate .udeb indexes
696			for _, arch := range p.Architectures {
697				indexes.PackageIndex(component, arch, true, false)
698			}
699		}
700
701		// For all architectures, generate Release files
702		for _, arch := range p.Architectures {
703			for _, udeb := range udebs {
704				release := make(Stanza)
705				release["Archive"] = p.Distribution
706				release["Architecture"] = arch
707				release["Component"] = component
708				release["Origin"] = p.GetOrigin()
709				release["Label"] = p.GetLabel()
710				if p.AcquireByHash {
711					release["Acquire-By-Hash"] = "yes"
712				}
713
714				var bufWriter *bufio.Writer
715				bufWriter, err = indexes.ReleaseIndex(component, arch, udeb).BufWriter()
716				if err != nil {
717					return fmt.Errorf("unable to get ReleaseIndex writer: %s", err)
718				}
719
720				err = release.WriteTo(bufWriter, false, true, false)
721				if err != nil {
722					return fmt.Errorf("unable to create Release file: %s", err)
723				}
724			}
725		}
726	}
727
728	for _, arch := range p.Architectures {
729		for _, udeb := range []bool{true, false} {
730			index := legacyContentIndexes[fmt.Sprintf("%s-%v", arch, udeb)]
731			if index == nil || index.Empty() {
732				continue
733			}
734
735			var bufWriter *bufio.Writer
736			bufWriter, err = indexes.LegacyContentsIndex(arch, udeb).BufWriter()
737			if err != nil {
738				return fmt.Errorf("unable to generate contents index: %v", err)
739			}
740
741			_, err = index.WriteTo(bufWriter)
742			if err != nil {
743				return fmt.Errorf("unable to generate contents index: %v", err)
744			}
745		}
746	}
747
748	if progress != nil {
749		progress.Printf("Finalizing metadata files...\n")
750	}
751
752	err = indexes.FinalizeAll(progress, signer)
753	if err != nil {
754		return err
755	}
756
757	release := make(Stanza)
758	release["Origin"] = p.GetOrigin()
759	if p.NotAutomatic != "" {
760		release["NotAutomatic"] = p.NotAutomatic
761	}
762	if p.ButAutomaticUpgrades != "" {
763		release["ButAutomaticUpgrades"] = p.ButAutomaticUpgrades
764	}
765	release["Label"] = p.GetLabel()
766	release["Suite"] = p.Distribution
767	release["Codename"] = p.Distribution
768	release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST")
769	release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{ArchitectureSource}), " ")
770	if p.AcquireByHash {
771		release["Acquire-By-Hash"] = "yes"
772	}
773	release["Description"] = " Generated by aptly\n"
774	release["MD5Sum"] = ""
775	release["SHA1"] = ""
776	release["SHA256"] = ""
777	release["SHA512"] = ""
778
779	release["Components"] = strings.Join(p.Components(), " ")
780
781	sortedPaths := make([]string, 0, len(indexes.generatedFiles))
782	for path := range indexes.generatedFiles {
783		sortedPaths = append(sortedPaths, path)
784	}
785	sort.Strings(sortedPaths)
786
787	for _, path := range sortedPaths {
788		info := indexes.generatedFiles[path]
789		release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path)
790		release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path)
791		release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path)
792		release["SHA512"] += fmt.Sprintf(" %s %8d %s\n", info.SHA512, info.Size, path)
793	}
794
795	releaseFile := indexes.ReleaseFile()
796	bufWriter, err := releaseFile.BufWriter()
797	if err != nil {
798		return err
799	}
800
801	err = release.WriteTo(bufWriter, false, true, false)
802	if err != nil {
803		return fmt.Errorf("unable to create Release file: %s", err)
804	}
805
806	// Signing files might output to console, so flush progress writer first
807	if progress != nil {
808		progress.Flush()
809	}
810
811	err = releaseFile.Finalize(signer)
812	if err != nil {
813		return err
814	}
815
816	return indexes.RenameFiles()
817}
818
819// RemoveFiles removes files that were created by Publish
820//
821// It can remove prefix fully, and part of pool (for specific component)
822func (p *PublishedRepo) RemoveFiles(publishedStorageProvider aptly.PublishedStorageProvider, removePrefix bool,
823	removePoolComponents []string, progress aptly.Progress) error {
824	publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage)
825
826	// I. Easy: remove whole prefix (meta+packages)
827	if removePrefix {
828		err := publishedStorage.RemoveDirs(filepath.Join(p.Prefix, "dists"), progress)
829		if err != nil {
830			return err
831		}
832
833		return publishedStorage.RemoveDirs(filepath.Join(p.Prefix, "pool"), progress)
834	}
835
836	// II. Medium: remove metadata, it can't be shared as prefix/distribution as unique
837	err := publishedStorage.RemoveDirs(filepath.Join(p.Prefix, "dists", p.Distribution), progress)
838	if err != nil {
839		return err
840	}
841
842	// III. Complex: there are no other publishes with the same prefix + component
843	for _, component := range removePoolComponents {
844		err = publishedStorage.RemoveDirs(filepath.Join(p.Prefix, "pool", component), progress)
845		if err != nil {
846			return err
847		}
848	}
849
850	return nil
851}
852
853// PublishedRepoCollection does listing, updating/adding/deleting of PublishedRepos
854type PublishedRepoCollection struct {
855	*sync.RWMutex
856	db   database.Storage
857	list []*PublishedRepo
858}
859
860// NewPublishedRepoCollection loads PublishedRepos from DB and makes up collection
861func NewPublishedRepoCollection(db database.Storage) *PublishedRepoCollection {
862	return &PublishedRepoCollection{
863		RWMutex: &sync.RWMutex{},
864		db:      db,
865	}
866}
867
868func (collection *PublishedRepoCollection) loadList() {
869	if collection.list != nil {
870		return
871	}
872
873	blobs := collection.db.FetchByPrefix([]byte("U"))
874	collection.list = make([]*PublishedRepo, 0, len(blobs))
875
876	for _, blob := range blobs {
877		r := &PublishedRepo{}
878		if err := r.Decode(blob); err != nil {
879			log.Printf("Error decoding published repo: %s\n", err)
880		} else {
881			collection.list = append(collection.list, r)
882		}
883	}
884}
885
886// Add appends new repo to collection and saves it
887func (collection *PublishedRepoCollection) Add(repo *PublishedRepo) error {
888	collection.loadList()
889
890	if collection.CheckDuplicate(repo) != nil {
891		return fmt.Errorf("published repo with storage/prefix/distribution %s/%s/%s already exists", repo.Storage, repo.Prefix, repo.Distribution)
892	}
893
894	err := collection.Update(repo)
895	if err != nil {
896		return err
897	}
898
899	collection.list = append(collection.list, repo)
900	return nil
901}
902
903// CheckDuplicate verifies that there's no published repo with the same name
904func (collection *PublishedRepoCollection) CheckDuplicate(repo *PublishedRepo) *PublishedRepo {
905	collection.loadList()
906
907	for _, r := range collection.list {
908		if r.Prefix == repo.Prefix && r.Distribution == repo.Distribution && r.Storage == repo.Storage {
909			return r
910		}
911	}
912
913	return nil
914}
915
916// Update stores updated information about repo in DB
917func (collection *PublishedRepoCollection) Update(repo *PublishedRepo) (err error) {
918	err = collection.db.Put(repo.Key(), repo.Encode())
919	if err != nil {
920		return
921	}
922
923	if repo.SourceKind == SourceLocalRepo {
924		for component, item := range repo.sourceItems {
925			err = collection.db.Put(repo.RefKey(component), item.packageRefs.Encode())
926			if err != nil {
927				return
928			}
929		}
930	}
931	return
932}
933
934// LoadComplete loads additional information for remote repo
935func (collection *PublishedRepoCollection) LoadComplete(repo *PublishedRepo, collectionFactory *CollectionFactory) (err error) {
936	repo.sourceItems = make(map[string]repoSourceItem)
937
938	if repo.SourceKind == SourceSnapshot {
939		for component, sourceUUID := range repo.Sources {
940			item := repoSourceItem{}
941
942			item.snapshot, err = collectionFactory.SnapshotCollection().ByUUID(sourceUUID)
943			if err != nil {
944				return
945			}
946			err = collectionFactory.SnapshotCollection().LoadComplete(item.snapshot)
947			if err != nil {
948				return
949			}
950
951			repo.sourceItems[component] = item
952		}
953	} else if repo.SourceKind == SourceLocalRepo {
954		for component, sourceUUID := range repo.Sources {
955			item := repoSourceItem{}
956
957			item.localRepo, err = collectionFactory.LocalRepoCollection().ByUUID(sourceUUID)
958			if err != nil {
959				return
960			}
961			err = collectionFactory.LocalRepoCollection().LoadComplete(item.localRepo)
962			if err != nil {
963				return
964			}
965
966			var encoded []byte
967			encoded, err = collection.db.Get(repo.RefKey(component))
968			if err != nil {
969				// < 0.6 saving w/o component name
970				if err == database.ErrNotFound && len(repo.Sources) == 1 {
971					encoded, err = collection.db.Get(repo.RefKey(""))
972				}
973
974				if err != nil {
975					return
976				}
977			}
978
979			item.packageRefs = &PackageRefList{}
980			err = item.packageRefs.Decode(encoded)
981			if err != nil {
982				return
983			}
984
985			repo.sourceItems[component] = item
986		}
987	} else {
988		panic("unknown SourceKind")
989	}
990
991	return
992}
993
994// ByStoragePrefixDistribution looks up repository by storage, prefix & distribution
995func (collection *PublishedRepoCollection) ByStoragePrefixDistribution(storage, prefix, distribution string) (*PublishedRepo, error) {
996	collection.loadList()
997
998	for _, r := range collection.list {
999		if r.Prefix == prefix && r.Distribution == distribution && r.Storage == storage {
1000			return r, nil
1001		}
1002	}
1003	if storage != "" {
1004		storage += ":"
1005	}
1006	return nil, fmt.Errorf("published repo with storage:prefix/distribution %s%s/%s not found", storage, prefix, distribution)
1007}
1008
1009// ByUUID looks up repository by uuid
1010func (collection *PublishedRepoCollection) ByUUID(uuid string) (*PublishedRepo, error) {
1011	collection.loadList()
1012
1013	for _, r := range collection.list {
1014		if r.UUID == uuid {
1015			return r, nil
1016		}
1017	}
1018	return nil, fmt.Errorf("published repo with uuid %s not found", uuid)
1019}
1020
1021// BySnapshot looks up repository by snapshot source
1022func (collection *PublishedRepoCollection) BySnapshot(snapshot *Snapshot) []*PublishedRepo {
1023	collection.loadList()
1024
1025	var result []*PublishedRepo
1026	for _, r := range collection.list {
1027		if r.SourceKind == SourceSnapshot {
1028			if r.SourceUUID == snapshot.UUID {
1029				result = append(result, r)
1030			}
1031
1032			for _, sourceUUID := range r.Sources {
1033				if sourceUUID == snapshot.UUID {
1034					result = append(result, r)
1035					break
1036				}
1037			}
1038		}
1039	}
1040	return result
1041}
1042
1043// ByLocalRepo looks up repository by local repo source
1044func (collection *PublishedRepoCollection) ByLocalRepo(repo *LocalRepo) []*PublishedRepo {
1045	collection.loadList()
1046
1047	var result []*PublishedRepo
1048	for _, r := range collection.list {
1049		if r.SourceKind == SourceLocalRepo {
1050			if r.SourceUUID == repo.UUID {
1051				result = append(result, r)
1052			}
1053
1054			for _, sourceUUID := range r.Sources {
1055				if sourceUUID == repo.UUID {
1056					result = append(result, r)
1057					break
1058				}
1059			}
1060		}
1061	}
1062	return result
1063}
1064
1065// ForEach runs method for each repository
1066func (collection *PublishedRepoCollection) ForEach(handler func(*PublishedRepo) error) error {
1067	return collection.db.ProcessByPrefix([]byte("U"), func(key, blob []byte) error {
1068		r := &PublishedRepo{}
1069		if err := r.Decode(blob); err != nil {
1070			log.Printf("Error decoding published repo: %s\n", err)
1071			return nil
1072		}
1073
1074		return handler(r)
1075	})
1076}
1077
1078// Len returns number of remote repos
1079func (collection *PublishedRepoCollection) Len() int {
1080	collection.loadList()
1081
1082	return len(collection.list)
1083}
1084
1085// CleanupPrefixComponentFiles removes all unreferenced files in published storage under prefix/component pair
1086func (collection *PublishedRepoCollection) CleanupPrefixComponentFiles(prefix string, components []string,
1087	publishedStorage aptly.PublishedStorage, collectionFactory *CollectionFactory, progress aptly.Progress) error {
1088
1089	collection.loadList()
1090
1091	var err error
1092	referencedFiles := map[string][]string{}
1093
1094	if progress != nil {
1095		progress.Printf("Cleaning up prefix %#v components %s...\n", prefix, strings.Join(components, ", "))
1096	}
1097
1098	for _, r := range collection.list {
1099		if r.Prefix == prefix {
1100			matches := false
1101
1102			repoComponents := r.Components()
1103
1104			for _, component := range components {
1105				if utils.StrSliceHasItem(repoComponents, component) {
1106					matches = true
1107					break
1108				}
1109			}
1110
1111			if !matches {
1112				continue
1113			}
1114
1115			err = collection.LoadComplete(r, collectionFactory)
1116			if err != nil {
1117				return err
1118			}
1119
1120			for _, component := range components {
1121				if utils.StrSliceHasItem(repoComponents, component) {
1122					packageList, err := NewPackageListFromRefList(r.RefList(component), collectionFactory.PackageCollection(), progress)
1123					if err != nil {
1124						return err
1125					}
1126
1127					packageList.ForEach(func(p *Package) error {
1128						poolDir, err := p.PoolDirectory()
1129						if err != nil {
1130							return err
1131						}
1132
1133						for _, f := range p.Files() {
1134							referencedFiles[component] = append(referencedFiles[component], filepath.Join(poolDir, f.Filename))
1135						}
1136
1137						return nil
1138					})
1139				}
1140			}
1141		}
1142	}
1143
1144	for _, component := range components {
1145		sort.Strings(referencedFiles[component])
1146
1147		rootPath := filepath.Join(prefix, "pool", component)
1148		existingFiles, err := publishedStorage.Filelist(rootPath)
1149		if err != nil {
1150			return err
1151		}
1152
1153		sort.Strings(existingFiles)
1154
1155		filesToDelete := utils.StrSlicesSubstract(existingFiles, referencedFiles[component])
1156
1157		for _, file := range filesToDelete {
1158			err = publishedStorage.Remove(filepath.Join(rootPath, file))
1159			if err != nil {
1160				return err
1161			}
1162		}
1163	}
1164
1165	return nil
1166}
1167
1168// Remove removes published repository, cleaning up directories, files
1169func (collection *PublishedRepoCollection) Remove(publishedStorageProvider aptly.PublishedStorageProvider,
1170	storage, prefix, distribution string, collectionFactory *CollectionFactory, progress aptly.Progress,
1171	force, skipCleanup bool) error {
1172
1173	collection.loadList()
1174
1175	repo, err := collection.ByStoragePrefixDistribution(storage, prefix, distribution)
1176	if err != nil {
1177		return err
1178	}
1179
1180	removePrefix := true
1181	removePoolComponents := repo.Components()
1182	cleanComponents := []string{}
1183	repoPosition := -1
1184
1185	for i, r := range collection.list {
1186		if r == repo {
1187			repoPosition = i
1188			continue
1189		}
1190		if r.Storage == repo.Storage && r.Prefix == repo.Prefix {
1191			removePrefix = false
1192
1193			rComponents := r.Components()
1194			for _, component := range rComponents {
1195				if utils.StrSliceHasItem(removePoolComponents, component) {
1196					removePoolComponents = utils.StrSlicesSubstract(removePoolComponents, []string{component})
1197					cleanComponents = append(cleanComponents, component)
1198				}
1199			}
1200		}
1201	}
1202
1203	err = repo.RemoveFiles(publishedStorageProvider, removePrefix, removePoolComponents, progress)
1204	if err != nil {
1205		if !force {
1206			return fmt.Errorf("published files removal failed, use -force-drop to override: %s", err)
1207		}
1208		// ignore error with -force-drop
1209	}
1210
1211	collection.list[len(collection.list)-1], collection.list[repoPosition], collection.list =
1212		nil, collection.list[len(collection.list)-1], collection.list[:len(collection.list)-1]
1213
1214	if !skipCleanup && len(cleanComponents) > 0 {
1215		err = collection.CleanupPrefixComponentFiles(repo.Prefix, cleanComponents,
1216			publishedStorageProvider.GetPublishedStorage(storage), collectionFactory, progress)
1217		if err != nil {
1218			if !force {
1219				return fmt.Errorf("cleanup failed, use -force-drop to override: %s", err)
1220			}
1221		}
1222	}
1223
1224	err = collection.db.Delete(repo.Key())
1225	if err != nil {
1226		return err
1227	}
1228
1229	for _, component := range repo.Components() {
1230		err = collection.db.Delete(repo.RefKey(component))
1231		if err != nil {
1232			return err
1233		}
1234	}
1235
1236	return nil
1237}
1238