1// Copyright 2019 Keybase Inc. All rights reserved.
2// Use of this source code is governed by a BSD
3// license that can be found in the LICENSE file.
4
5package search
6
7import (
8	"context"
9	"os"
10	"path"
11	"path/filepath"
12	"sort"
13	"strings"
14	"sync"
15	"time"
16
17	"github.com/blevesearch/bleve"
18	"github.com/blevesearch/bleve/index/store"
19	"github.com/blevesearch/bleve/registry"
20	"github.com/keybase/client/go/kbfs/data"
21	"github.com/keybase/client/go/kbfs/idutil"
22	"github.com/keybase/client/go/kbfs/ioutil"
23	"github.com/keybase/client/go/kbfs/kbfsmd"
24	"github.com/keybase/client/go/kbfs/kbfssync"
25	"github.com/keybase/client/go/kbfs/libcontext"
26	"github.com/keybase/client/go/kbfs/libfs"
27	"github.com/keybase/client/go/kbfs/libkbfs"
28	"github.com/keybase/client/go/kbfs/tlf"
29	kbname "github.com/keybase/client/go/kbun"
30	"github.com/keybase/client/go/logger"
31	"github.com/keybase/client/go/protocol/keybase1"
32	"github.com/pkg/errors"
33	"github.com/shirou/gopsutil/mem"
34	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
35	billy "gopkg.in/src-d/go-billy.v4"
36)
37
38const (
39	textFileType          = "kbfsTextFile"
40	htmlFileType          = "kbfsHTMLFile"
41	kvstoreNamePrefix     = "kbfs"
42	bleveIndexType        = "upside_down"
43	fsIndexStorageDir     = "kbfs_index"
44	docDbDir              = "docdb"
45	nameDocIDPrefix       = "name_"
46	defaultIndexBatchSize = 10 * 1024 * 1024 // 10 MB
47	indexBatchSizeFactor  = 500
48	minIndexBatchSize     = 1 * 1024 * 1024   // 1 MB
49	maxIndexBatchSize     = 100 * 1024 * 1024 // 100 MB
50)
51
52const (
53	// CtxOpID is the display name for the unique operation index ID tag.
54	ctxOpID = "IID"
55)
56
57// CtxTagKey is the type used for unique context tags
58type ctxTagKey int
59
60const (
61	// CtxIDKey is the type of the tag for unique operation IDs.
62	ctxIDKey ctxTagKey = iota
63)
64
65type tlfMessage struct {
66	tlfID tlf.ID
67	rev   kbfsmd.Revision
68	mode  keybase1.FolderSyncMode
69}
70
71type initFn func(
72	context.Context, libkbfs.Config, idutil.SessionInfo, logger.Logger) (
73	context.Context, libkbfs.Config, func(context.Context) error, error)
74
75// Indexer can index and search KBFS TLFs.
76type Indexer struct {
77	config       libkbfs.Config
78	log          logger.Logger
79	cancelLoop   context.CancelFunc
80	remoteStatus libfs.RemoteStatus
81	configInitFn initFn
82	once         sync.Once
83	indexWG      kbfssync.RepeatedWaitGroup
84	loopWG       kbfssync.RepeatedWaitGroup
85	kvstoreName  string
86	fullIndexCB  func() error // helpful for testing
87	progress     *Progress
88
89	userChangedCh chan struct{}
90	tlfCh         chan tlfMessage
91	shutdownCh    chan struct{}
92
93	lock           sync.RWMutex
94	index          bleve.Index
95	indexConfig    libkbfs.Config
96	configShutdown func(context.Context) error
97	blocksDb       *IndexedBlockDb
98	tlfDb          *IndexedTlfDb
99	docDb          *DocDb
100	indexReadyCh   chan struct{}
101	cancelCtx      context.CancelFunc
102	fs             billy.Filesystem
103	currBatch      *bleve.Batch
104	currBatchSize  uint64
105	batchFns       []func() error
106}
107
108func newIndexerWithConfigInit(config libkbfs.Config, configInitFn initFn,
109	kvstoreName string) (
110	*Indexer, error) {
111	log := config.MakeLogger("search")
112	i := &Indexer{
113		config:        config,
114		log:           log,
115		configInitFn:  configInitFn,
116		kvstoreName:   kvstoreName,
117		progress:      NewProgress(config.Clock()),
118		userChangedCh: make(chan struct{}, 1),
119		tlfCh:         make(chan tlfMessage, 1000),
120		shutdownCh:    make(chan struct{}),
121		indexReadyCh:  make(chan struct{}),
122	}
123
124	i.startLoop()
125	return i, nil
126}
127
128// NewIndexer creates a new instance of an Indexer.
129func NewIndexer(config libkbfs.Config) (*Indexer, error) {
130	return newIndexerWithConfigInit(
131		config, defaultInitConfig, kvstoreNamePrefix)
132}
133
134func (i *Indexer) startLoop() {
135	ctx, cancel := context.WithCancel(i.makeContext(context.Background()))
136	i.cancelLoop = cancel
137	i.loopWG.Add(1)
138	go i.loop(ctx)
139}
140
141func (i *Indexer) makeContext(ctx context.Context) context.Context {
142	return libkbfs.CtxWithRandomIDReplayable(ctx, ctxIDKey, ctxOpID, i.log)
143}
144
145func (i *Indexer) closeIndexLocked(ctx context.Context) error {
146	if i.index == nil {
147		return nil
148	}
149
150	err := i.index.Close()
151	if err != nil {
152		return err
153	}
154
155	// If the ready channel has already been closed, make a new one.
156	select {
157	case <-i.indexReadyCh:
158		i.indexReadyCh = make(chan struct{})
159	default:
160	}
161
162	i.blocksDb.Shutdown(ctx)
163	i.tlfDb.Shutdown(ctx)
164	i.docDb.Shutdown(ctx)
165
166	shutdownErr := i.configShutdown(ctx)
167	i.index = nil
168	i.indexConfig = nil
169	i.blocksDb = nil
170	i.docDb = nil
171	i.tlfDb = nil
172	i.cancelCtx()
173	return shutdownErr
174}
175
176func defaultInitConfig(
177	ctx context.Context, config libkbfs.Config, session idutil.SessionInfo,
178	log logger.Logger) (
179	newCtx context.Context, newConfig libkbfs.Config,
180	shutdownFn func(context.Context) error, err error) {
181	kbCtx := config.KbContext()
182	params, err := Params(kbCtx, config.StorageRoot(), session.UID)
183	if err != nil {
184		return nil, nil, nil, err
185	}
186	newCtx, newConfig, err = Init(
187		ctx, kbCtx, params, libkbfs.NewKeybaseServicePassthrough(config),
188		log, config.VLogLevel())
189	if err != nil {
190		return nil, nil, nil, err
191	}
192
193	return newCtx, newConfig, newConfig.Shutdown, err
194}
195
196func (i *Indexer) loadIndex(ctx context.Context) (err error) {
197	i.log.CDebugf(ctx, "Loading index")
198	defer func() { i.log.CDebugf(ctx, "Done loading index: %+v", err) }()
199	i.lock.Lock()
200	defer i.lock.Unlock()
201
202	err = i.closeIndexLocked(ctx)
203	if err != nil {
204		return err
205	}
206
207	session, err := idutil.GetCurrentSessionIfPossible(
208		ctx, i.config.KBPKI(), true)
209	if err != nil {
210		return err
211	}
212	if session.Name == "" {
213		return nil
214	}
215
216	// Create a new Config object for the index data, with a storage
217	// root that's unique to this user.
218	ctx, indexConfig, configShutdown, err := i.configInitFn(
219		ctx, i.config, session, i.log)
220	if err != nil {
221		return err
222	}
223	cancelCtx := func() {
224		_ = libcontext.CleanupCancellationDelayer(ctx)
225	}
226	defer func() {
227		if err != nil {
228			configErr := indexConfig.Shutdown(ctx)
229			if configErr != nil {
230				i.log.CDebugf(ctx, "Couldn't shutdown config: %+v", configErr)
231			}
232			cancelCtx()
233		}
234	}()
235
236	// Store the index in a KBFS private folder for the current user,
237	// with all the blocks and MD stored in the storage root created
238	// above.  Everything will be encrypted as if it were in the
239	// user's own private KBFS folder.
240	privateHandle, err := libkbfs.GetHandleFromFolderNameAndType(
241		ctx, indexConfig.KBPKI(), indexConfig.MDOps(), indexConfig,
242		string(session.Name), tlf.Private)
243	if err != nil {
244		return err
245	}
246	fs, err := libfs.NewFS(
247		ctx, indexConfig, privateHandle, data.MasterBranch, "", "", 0)
248	if err != nil {
249		return err
250	}
251	err = fs.MkdirAll(fsIndexStorageDir, 0400)
252	if err != nil {
253		return err
254	}
255	fs, err = fs.ChrootAsLibFS(fsIndexStorageDir)
256	if err != nil {
257		return err
258	}
259
260	// The index itself will use LevelDB storage that writes to the
261	// KBFS filesystem object made above.  Register this storage type
262	// with Bleve.
263	i.fs = fs
264	i.once.Do(func() {
265		kvstoreConstructor := func(
266			mo store.MergeOperator, _ map[string]interface{}) (
267			s store.KVStore, err error) {
268			return newBleveLevelDBStore(i.fs, false, mo)
269		}
270		registry.RegisterKVStore(i.kvstoreName, kvstoreConstructor)
271	})
272
273	// Create the actual index using this storage type.  Bleve has
274	// different calls for new vs. existing indicies, so we first need
275	// to check if it exists.  The Bleve LevelDB storage takes a lock,
276	// so we don't really need to worry about concurrent KBFS
277	// processes here.
278	var index bleve.Index
279	p := filepath.Join(i.config.StorageRoot(), indexStorageDir, bleveIndexDir)
280	_, err = os.Stat(p)
281	switch {
282	case os.IsNotExist(errors.Cause(err)):
283		i.log.CDebugf(ctx, "Creating new index for user %s/%s",
284			session.Name, session.UID)
285
286		indexMapping, err := makeIndexMapping()
287		if err != nil {
288			return err
289		}
290		index, err = bleve.NewUsing(
291			p, indexMapping, bleveIndexType, i.kvstoreName, nil)
292		if err != nil {
293			return err
294		}
295	case err == nil:
296		i.log.CDebugf(ctx, "Using existing index for user %s/%s",
297			session.Name, session.UID)
298
299		index, err = bleve.OpenUsing(p, nil)
300		if err != nil {
301			return err
302		}
303	default:
304		return err
305	}
306
307	// Load the blocks DB.
308	blocksDb, err := newIndexedBlockDb(i.config, indexConfig.StorageRoot())
309	if err != nil {
310		return err
311	}
312
313	// Load the TLF DB.
314	tlfDb, err := newIndexedTlfDb(i.config, indexConfig.StorageRoot())
315	if err != nil {
316		return err
317	}
318
319	err = fs.MkdirAll(docDbDir, 0600)
320	if err != nil {
321		return err
322	}
323	docFS, err := fs.Chroot(docDbDir)
324	if err != nil {
325		return err
326	}
327	docDb, err := newDocDb(indexConfig, docFS)
328	if err != nil {
329		return err
330	}
331
332	err = indexConfig.KBFSOps().SyncFromServer(
333		ctx, fs.RootNode().GetFolderBranch(), nil)
334	if err != nil {
335		return err
336	}
337
338	i.index = index
339	i.indexConfig = indexConfig
340	i.configShutdown = configShutdown
341	i.blocksDb = blocksDb
342	i.tlfDb = tlfDb
343	i.docDb = docDb
344	i.cancelCtx = cancelCtx
345	close(i.indexReadyCh)
346	return nil
347}
348
349// UserChanged implements the libfs.RemoteStatusUpdater for Indexer.
350func (i *Indexer) UserChanged(
351	ctx context.Context, oldName, newName kbname.NormalizedUsername) {
352	select {
353	case i.userChangedCh <- struct{}{}:
354	default:
355		i.log.CDebugf(ctx, "Dropping user changed notification")
356	}
357}
358
359var _ libfs.RemoteStatusUpdater = (*Indexer)(nil)
360
361func (i *Indexer) getMDForRev(
362	ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) (
363	md libkbfs.ImmutableRootMetadata, err error) {
364	return libkbfs.GetSingleMD(
365		ctx, i.config, tlfID, kbfsmd.NullBranchID, rev, kbfsmd.Merged, nil)
366}
367
368func (i *Indexer) tlfQueueForProgress(
369	ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) error {
370	md, err := i.getMDForRev(ctx, tlfID, rev)
371	if err != nil {
372		return err
373	}
374	// For now assume we will be indexing the entire TLF.  If when
375	// we actually start indexing, we figure out that this is an
376	// incremental index, we can update it. `DiskUsage` is the
377	// encoded, padded size, but it's the best we can easily do
378	// right now.
379	i.progress.tlfQueue(tlfID, md.DiskUsage())
380	return nil
381}
382
383// FullSyncStarted implements the libkbfs.SyncedTlfObserver interface
384// for Indexer.
385func (i *Indexer) FullSyncStarted(
386	ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision,
387	waitCh <-chan struct{}) {
388	i.log.CDebugf(ctx, "Sync started for %s/%d", tlfID, rev)
389	i.indexWG.Add(1)
390	go func() {
391		select {
392		case <-waitCh:
393		case <-i.shutdownCh:
394			i.indexWG.Done()
395			return
396		}
397
398		ctx := i.makeContext(context.Background())
399		err := i.tlfQueueForProgress(ctx, tlfID, rev)
400		if err != nil {
401			i.log.CDebugf(
402				ctx, "Couldn't enqueue for %s/%s: %+v", tlfID, rev, err)
403			i.indexWG.Done()
404			return
405		}
406
407		m := tlfMessage{tlfID, rev, keybase1.FolderSyncMode_ENABLED}
408		select {
409		case i.tlfCh <- m:
410		default:
411			i.progress.tlfUnqueue(tlfID)
412			i.indexWG.Done()
413			i.log.CDebugf(
414				context.Background(), "Couldn't send TLF message for %s/%d",
415				tlfID, rev)
416		}
417	}()
418}
419
420// SyncModeChanged implements the libkbfs.SyncedTlfObserver interface
421// for Indexer.
422func (i *Indexer) SyncModeChanged(
423	ctx context.Context, tlfID tlf.ID, newMode keybase1.FolderSyncMode) {
424	i.log.CDebugf(ctx, "Sync mode changed for %s to %s", tlfID, newMode)
425	i.indexWG.Add(1)
426
427	// Don't enqueue progress for a TLF when the sync mode changes; if
428	// the TLF is now being synced, `FullSyncStarted` will also be
429	// called.
430
431	m := tlfMessage{tlfID, kbfsmd.RevisionUninitialized, newMode}
432	select {
433	case i.tlfCh <- m:
434	default:
435		i.indexWG.Done()
436		i.log.CDebugf(
437			context.Background(), "Couldn't send TLF message for %s/%s",
438			tlfID, newMode)
439	}
440}
441
442var _ libkbfs.SyncedTlfObserver = (*Indexer)(nil)
443
444func (i *Indexer) getCurrentPtrAndNode(
445	ctx context.Context, parentNode libkbfs.Node,
446	childName data.PathPartString) (
447	ptr data.BlockPointer, n libkbfs.Node, ei data.EntryInfo, err error) {
448	n, ei, err = i.config.KBFSOps().Lookup(ctx, parentNode, childName)
449	if err != nil {
450		return data.ZeroPtr, nil, data.EntryInfo{}, err
451	}
452
453	// Symlinks don't have block pointers.
454	if n == nil {
455		return data.ZeroPtr, nil, ei, nil
456	}
457
458	// Let's find the current block ID.
459	md, err := i.config.KBFSOps().GetNodeMetadata(ctx, n)
460	if err != nil {
461		return data.ZeroPtr, nil, data.EntryInfo{}, err
462	}
463	return md.BlockInfo.BlockPointer, n, ei, nil
464}
465
466func nameDocID(docID string) string {
467	return nameDocIDPrefix + docID
468}
469
470func (i *Indexer) flushBatchLocked(ctx context.Context) error {
471	if i.currBatch == nil {
472		return nil
473	}
474	defer func() {
475		i.currBatch = nil
476		i.batchFns = nil
477	}()
478
479	// Flush the old batch.
480	i.log.CDebugf(
481		ctx, "Flushing a batch of size %d", i.currBatch.TotalDocsSize())
482	err := i.index.Batch(i.currBatch)
483	if err != nil {
484		return err
485	}
486	for _, f := range i.batchFns {
487		err := f()
488		if err != nil {
489			return err
490		}
491	}
492	return i.blocksDb.ClearMemory()
493}
494
495func (i *Indexer) flushBatch(ctx context.Context) error {
496	i.lock.Lock()
497	defer i.lock.Unlock()
498	return i.flushBatchLocked(ctx)
499}
500
501func (i *Indexer) refreshBatchLocked(ctx context.Context) error {
502	if i.index == nil {
503		return errors.New("Index not loaded")
504	}
505	err := i.flushBatchLocked(ctx)
506	if err != nil {
507		return err
508	}
509	i.currBatch = i.index.NewBatch()
510
511	// Try to scale the batch size appropriately, given the current
512	// available memory on the system.
513	i.currBatchSize = defaultIndexBatchSize
514	vmstat, err := mem.VirtualMemory()
515	if err == nil {
516		// Allow large batches only if there is plenty of available
517		// memory.  Bleve allocates a lot of memory per batch (I think
518		// maybe 100x+ the batch size), so we need lots of spare
519		// overhead.
520		allowable := vmstat.Available / indexBatchSizeFactor
521		if allowable > maxIndexBatchSize {
522			allowable = maxIndexBatchSize
523		} else if allowable < minIndexBatchSize {
524			allowable = minIndexBatchSize
525		}
526
527		i.log.CDebugf(
528			ctx, "Setting the indexing batch size to %d "+
529				"(available mem = %d)", allowable, vmstat.Available)
530		i.currBatchSize = allowable
531	}
532
533	return nil
534}
535
536func (i *Indexer) refreshBatch(ctx context.Context) error {
537	i.lock.Lock()
538	defer i.lock.Unlock()
539	return i.refreshBatchLocked(ctx)
540}
541
542func (i *Indexer) currBatchLocked(ctx context.Context) (*bleve.Batch, error) {
543	if i.currBatch == nil {
544		return nil, errors.New("No current batch")
545	}
546
547	if i.currBatch.TotalDocsSize() > i.currBatchSize {
548		err := i.refreshBatchLocked(ctx)
549		if err != nil {
550			return nil, err
551		}
552	}
553	return i.currBatch, nil
554}
555
556func (i *Indexer) checkDone(ctx context.Context) error {
557	select {
558	case <-ctx.Done():
559		return ctx.Err()
560	case <-i.shutdownCh:
561		return errors.New("Shutdown")
562	default:
563		return nil
564	}
565}
566
567func (i *Indexer) indexChildWithPtrAndNode(
568	ctx context.Context, parentNode libkbfs.Node, parentDocID string,
569	childName data.PathPartString, oldPtr, newPtr data.BlockPointer,
570	n libkbfs.Node, ei data.EntryInfo, nextDocID string,
571	revision kbfsmd.Revision) (dirDoneFn func() error, err error) {
572	if i.blocksDb == nil {
573		return nil, errors.New("No indexed blocks db")
574	}
575
576	if i.fullIndexCB != nil {
577		// Error on indexing this node if the callback tells us to
578		// (useful for testing).
579		err := i.fullIndexCB()
580		if err != nil {
581			i.log.CDebugf(ctx, "Stopping index due to testing error: %+v", err)
582			return nil, err
583		}
584	}
585
586	err = i.checkDone(ctx)
587	if err != nil {
588		return nil, err
589	}
590
591	defer func() {
592		if err == nil {
593			// Mark the bytes of this child as indexed.  This is the
594			// actual unencrypted size of the entry, which won't match
595			// up perfectly with the disk usage, but it's the easiest
596			// thing to do for now.
597			i.progress.indexedBytes(ei.Size)
598		}
599	}()
600
601	tlfID := n.GetFolderBranch().Tlf
602
603	// If the new pointer has already been indexed, skip indexing it again.
604	v, docID, dirDone, err := i.blocksDb.Get(ctx, newPtr)
605	switch errors.Cause(err) {
606	case nil:
607		if v == currentIndexedBlocksDbVersion {
608			i.log.CDebugf(
609				ctx, "%s/%s already indexed; skipping (type=%s, dirDone=%t)",
610				newPtr, childName, ei.Type, dirDone)
611			if ei.Type != data.Dir || dirDone {
612				return nil, nil
613			}
614			return func() error {
615				flushFn, err := i.blocksDb.PutMemory(
616					ctx, tlfID, newPtr, currentIndexedBlocksDbVersion, docID,
617					true)
618				if err != nil {
619					return err
620				}
621				i.lock.Lock()
622				defer i.lock.Unlock()
623				i.batchFns = append(i.batchFns, flushFn)
624				return nil
625			}, nil
626		}
627	case ldberrors.ErrNotFound:
628	default:
629		return nil, err
630	}
631
632	if oldPtr != data.ZeroPtr {
633		_, docID, _, err = i.blocksDb.Get(ctx, oldPtr)
634		switch errors.Cause(err) {
635		case nil:
636		case ldberrors.ErrNotFound:
637			return nil, errors.WithStack(OldPtrNotFound{oldPtr})
638		default:
639			return nil, err
640		}
641	} else {
642		docID = nextDocID
643	}
644
645	dirDoneFn = func() error {
646		flushFn, err := i.blocksDb.PutMemory(
647			ctx, tlfID, newPtr, currentIndexedBlocksDbVersion, docID, true)
648		if err != nil {
649			return err
650		}
651		i.lock.Lock()
652		defer i.lock.Unlock()
653		i.batchFns = append(i.batchFns, flushFn)
654		return nil
655	}
656
657	// Get the content type and create a document based on that type.
658	d, nameD, err := makeDoc(
659		ctx, i.config, n, ei, revision, time.Unix(0, ei.Mtime))
660	if err != nil {
661		return nil, err
662	}
663
664	i.lock.Lock()
665	defer i.lock.Unlock()
666	if i.index == nil {
667		return nil, errors.New("Index not loaded")
668	}
669
670	b, err := i.currBatchLocked(ctx)
671	if err != nil {
672		return nil, err
673	}
674
675	if d != nil {
676		err = b.Index(docID, d)
677		if err != nil {
678			return nil, err
679		}
680	}
681	err = b.Index(nameDocID(docID), nameD)
682	if err != nil {
683		return nil, err
684	}
685
686	// Put the docID into the DB after a successful indexing.
687	flushFn, err := i.blocksDb.PutMemory(
688		ctx, tlfID, newPtr, currentIndexedBlocksDbVersion, docID, false)
689	if err != nil {
690		return nil, err
691	}
692
693	i.batchFns = append(i.batchFns, func() error {
694		err := flushFn()
695		if err != nil {
696			return err
697		}
698
699		// Save the docID -> parentDocID mapping.
700		err = i.docDb.Put(ctx, docID, parentDocID, childName.Plaintext())
701		if err != nil {
702			return err
703		}
704
705		// Delete the old pointer if one was given.
706		if oldPtr != data.ZeroPtr {
707			err = i.blocksDb.Delete(ctx, tlfID, oldPtr)
708			if err != nil {
709				return err
710			}
711		}
712
713		return nil
714	})
715
716	return dirDoneFn, nil
717}
718
719func (i *Indexer) indexChild(
720	ctx context.Context, parentNode libkbfs.Node, parentDocID string,
721	childName data.PathPartString, nextDocID string,
722	revision kbfsmd.Revision) (dirDoneFn func() error, err error) {
723	ptr, n, ei, err := i.getCurrentPtrAndNode(ctx, parentNode, childName)
724	if err != nil {
725		return nil, err
726	}
727
728	if ptr == data.ZeroPtr {
729		// Skip indexing symlinks for now -- they are hard to track
730		// since they don't have a BlockPointer to put in the blocksDb.
731		return nil, nil
732	}
733
734	return i.indexChildWithPtrAndNode(
735		ctx, parentNode, parentDocID, childName, data.ZeroPtr, ptr, n, ei,
736		nextDocID, revision)
737}
738
739func (i *Indexer) updateChild(
740	ctx context.Context, parentNode libkbfs.Node, parentDocID string,
741	childName data.PathPartString, oldPtr data.BlockPointer,
742	revision kbfsmd.Revision) (dirDoneFn func() error, err error) {
743	newPtr, n, ei, err := i.getCurrentPtrAndNode(ctx, parentNode, childName)
744	if err != nil {
745		return nil, err
746	}
747
748	if newPtr == data.ZeroPtr {
749		// Symlinks should never be updated.
750		return nil, errors.Errorf("Symlink %s should not be updated", childName)
751	}
752
753	return i.indexChildWithPtrAndNode(
754		ctx, parentNode, parentDocID, childName, oldPtr, newPtr, n, ei,
755		"" /* should get picked up from DB, not from this param*/, revision)
756}
757
758func (i *Indexer) renameChild(
759	ctx context.Context, parentNode libkbfs.Node, parentDocID string,
760	childName data.PathPartString, revision kbfsmd.Revision) (err error) {
761	ptr, n, ei, err := i.getCurrentPtrAndNode(ctx, parentNode, childName)
762	if err != nil {
763		return err
764	}
765
766	if ptr == data.ZeroPtr {
767		// Ignore symlink renames.
768		return nil
769	}
770
771	i.log.CDebugf(ctx, "Found %s for child %s", ptr, childName)
772
773	if i.blocksDb == nil {
774		return errors.New("No indexed blocks db")
775	}
776
777	// Get the docID.
778	_, docID, _, err := i.blocksDb.Get(ctx, ptr)
779	if err != nil {
780		// Treat "not found" errors as real errors, since a rename
781		// implies that the doc should have already been indexed.
782		return err
783	}
784
785	i.lock.Lock()
786	defer i.lock.Unlock()
787
788	b, err := i.currBatchLocked(ctx)
789	if err != nil {
790		return err
791	}
792
793	newNameDoc := makeNameDoc(n, revision, time.Unix(0, ei.Mtime))
794	err = b.Index(nameDocID(docID), newNameDoc)
795	if err != nil {
796		return err
797	}
798
799	// Rename the doc ID for the new name.
800	i.batchFns = append(
801		i.batchFns,
802		func() error {
803			// Fix the child name in the doc db.
804			return i.docDb.Put(ctx, docID, parentDocID, childName.Plaintext())
805		})
806
807	return nil
808}
809
810func (i *Indexer) deleteFromUnrefs(
811	ctx context.Context, tlfID tlf.ID, unrefs []data.BlockPointer) (err error) {
812	if i.blocksDb == nil {
813		return errors.New("No indexed blocks db")
814	}
815
816	// Find the right doc ID.
817	var docID string
818	var unref data.BlockPointer
819unrefLoop:
820	for _, unref = range unrefs {
821		_, docID, _, err = i.blocksDb.Get(ctx, unref)
822		switch errors.Cause(err) {
823		case nil:
824			break unrefLoop
825		case ldberrors.ErrNotFound:
826			continue
827		default:
828			return err
829		}
830	}
831	if docID == "" {
832		i.log.CDebugf(ctx, "Couldn't find doc ID for deleted ptrs %v", unrefs)
833		return nil
834	}
835
836	i.lock.Lock()
837	defer i.lock.Unlock()
838
839	b, err := i.currBatchLocked(ctx)
840	if err != nil {
841		return err
842	}
843
844	b.Delete(docID)
845	b.Delete(nameDocID(docID))
846	err = i.index.Batch(b)
847	if err != nil {
848		return err
849	}
850
851	i.batchFns = append(
852		i.batchFns,
853		func() error { return i.docDb.Delete(ctx, docID) },
854		func() error { return i.blocksDb.Delete(ctx, tlfID, unref) },
855	)
856	return nil
857}
858
859func (i *Indexer) fsForRev(
860	ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) (*libfs.FS, error) {
861	if rev == kbfsmd.RevisionUninitialized {
862		return nil, errors.New("No revision provided")
863	}
864	branch := data.MakeRevBranchName(rev)
865
866	md, err := i.getMDForRev(ctx, tlfID, rev)
867	if err != nil {
868		return nil, err
869	}
870
871	h := md.GetTlfHandle()
872	return libfs.NewReadonlyFS(
873		ctx, i.config, h, branch, "", "", keybase1.MDPriorityNormal)
874}
875
876func (i *Indexer) indexNewlySyncedTlfDir(
877	ctx context.Context, dir libkbfs.Node,
878	dirDocID string, rev kbfsmd.Revision) error {
879	err := i.checkDone(ctx)
880	if err != nil {
881		return err
882	}
883
884	children, err := i.config.KBFSOps().GetDirChildren(ctx, dir)
885	if err != nil {
886		return err
887	}
888
889	if len(children) == 0 {
890		// Nothing to do.
891		return nil
892	}
893
894	ids, err := i.blocksDb.GetNextDocIDs(len(children))
895	if err != nil {
896		return err
897	}
898
899	currDocID := 0
900	for name, child := range children {
901		dirDoneFn, err := i.indexChild(
902			ctx, dir, dirDocID, name, ids[currDocID], rev)
903		if err != nil {
904			return err
905		}
906		docID := ids[currDocID]
907		currDocID++
908
909		if child.Type == data.Dir && dirDoneFn != nil {
910			n, _, err := i.config.KBFSOps().Lookup(ctx, dir, name)
911			if err != nil {
912				return err
913			}
914
915			err = i.indexNewlySyncedTlfDir(ctx, n, docID, rev)
916			if err != nil {
917				return err
918			}
919
920			err = dirDoneFn()
921			if err != nil {
922				return err
923			}
924		}
925	}
926	return nil
927}
928
929func (i *Indexer) recordUpdatedNodePtr(
930	ctx context.Context, node libkbfs.Node, rev kbfsmd.Revision, docID string,
931	oldPtr data.BlockPointer) (dirDoneFn func() error, err error) {
932	md, err := i.config.KBFSOps().GetNodeMetadata(ctx, node)
933	if err != nil {
934		return nil, err
935	}
936	tlfID := node.GetFolderBranch().Tlf
937	i.lock.Lock()
938	defer i.lock.Unlock()
939	flushFn, err := i.blocksDb.PutMemory(
940		ctx, tlfID, md.BlockInfo.BlockPointer,
941		currentIndexedBlocksDbVersion, docID, false)
942	if err != nil {
943		return nil, err
944	}
945	i.batchFns = append(i.batchFns, flushFn)
946
947	return func() error {
948		flushFn, err := i.blocksDb.PutMemory(
949			ctx, tlfID, md.BlockInfo.BlockPointer,
950			currentIndexedBlocksDbVersion, docID, true)
951		if err != nil {
952			return err
953		}
954
955		i.lock.Lock()
956		defer i.lock.Unlock()
957		i.batchFns = append(i.batchFns, flushFn)
958
959		if oldPtr != data.ZeroPtr {
960			err := i.blocksDb.Delete(ctx, tlfID, oldPtr)
961			if err != nil {
962				return err
963			}
964		}
965		return nil
966	}, nil
967}
968
969func (i *Indexer) indexNewlySyncedTlf(
970	ctx context.Context, fs *libfs.FS, rev kbfsmd.Revision) (err error) {
971	root := fs.RootNode()
972
973	ids, err := i.blocksDb.GetNextDocIDs(1)
974	if err != nil {
975		return err
976	}
977	id := ids[0]
978	err = i.docDb.Put(ctx, id, "", fs.Handle().GetCanonicalPath())
979	if err != nil {
980		return err
981	}
982
983	err = i.refreshBatch(ctx)
984	if err != nil {
985		return err
986	}
987
988	defer func() {
989		flushErr := i.flushBatch(ctx)
990		if flushErr == nil {
991			return
992		}
993		i.log.CDebugf(ctx, "Error flushing batch: %+v", flushErr)
994		if err == nil {
995			err = flushErr
996		}
997	}()
998
999	// Record the docID for the root node. But no need to index the
1000	// root dir, since it doesn't really have a name.
1001	dirDoneFn, err := i.recordUpdatedNodePtr(ctx, root, rev, id, data.ZeroPtr)
1002	if err != nil {
1003		return err
1004	}
1005	defer func() {
1006		if err != nil {
1007			return
1008		}
1009
1010		err = dirDoneFn()
1011	}()
1012
1013	return i.indexNewlySyncedTlfDir(ctx, root, id, rev)
1014}
1015
1016func (i *Indexer) doFullIndex(
1017	ctx context.Context, m tlfMessage, rev kbfsmd.Revision) (err error) {
1018	i.log.CDebugf(ctx, "Doing full index of %s at rev %d", m.tlfID, rev)
1019	defer func() {
1020		i.log.CDebugf(
1021			ctx, "Finished full index of %s at rev %d: %+v", m.tlfID, rev, err)
1022	}()
1023
1024	md, err := i.getMDForRev(ctx, m.tlfID, rev)
1025	if err != nil {
1026		return err
1027	}
1028	err = i.progress.startIndex(m.tlfID, md.DiskUsage(), indexFull)
1029	if err != nil {
1030		return err
1031	}
1032	defer func() {
1033		progErr := i.progress.finishIndex(m.tlfID)
1034		if progErr != nil {
1035			i.log.CDebugf(ctx, "Couldn't finish index: %+v", err)
1036		}
1037	}()
1038
1039	fs, err := i.fsForRev(ctx, m.tlfID, rev)
1040	if err != nil {
1041		return err
1042	}
1043
1044	// Check whether this revision has been garbage-collected yet.  If
1045	// so, return a typed error.  The caller may wish to clear out the
1046	// current index for the TLF in this case.
1047	status, _, err := i.config.KBFSOps().FolderStatus(
1048		ctx, fs.RootNode().GetFolderBranch())
1049	if err != nil {
1050		return err
1051	}
1052	if rev <= status.LastGCRevision {
1053		return errors.WithStack(
1054			RevisionGCdError{m.tlfID, rev, status.LastGCRevision})
1055	}
1056
1057	// Record that we've started a full sync for this TLF at this
1058	// revision.  If it gets interrupted, it should be resumed on the
1059	// next restart of the indexer.  There is no `indexedRev`, because
1060	// this function should only be called when a full index is
1061	// needed.
1062	err = i.tlfDb.Put(ctx, m.tlfID, kbfsmd.RevisionUninitialized, rev)
1063	if err != nil {
1064		return err
1065	}
1066
1067	defer func() {
1068		if err != nil {
1069			return
1070		}
1071
1072		// After a successful indexing, mark the revision as fully indexed.
1073		err = i.tlfDb.Put(ctx, m.tlfID, rev, kbfsmd.RevisionUninitialized)
1074	}()
1075
1076	return i.indexNewlySyncedTlf(ctx, fs, rev)
1077}
1078
1079func (i *Indexer) doIncrementalIndex(
1080	ctx context.Context, m tlfMessage, indexedRev, newRev kbfsmd.Revision) (
1081	err error) {
1082	i.log.CDebugf(
1083		ctx, "Incremental index %s: %d -> %d", m.tlfID, indexedRev, newRev)
1084	defer func() {
1085		i.log.CDebugf(ctx, "Incremental index %s: %d -> %d: %+v",
1086			m.tlfID, indexedRev, newRev, err)
1087	}()
1088
1089	// Gather list of changes after indexedRev, up to and including newRev.
1090	changes, refSize, err := libkbfs.GetChangesBetweenRevisions(
1091		ctx, i.config, m.tlfID, indexedRev, newRev)
1092	if err != nil {
1093		return err
1094	}
1095
1096	err = i.progress.startIndex(m.tlfID, refSize, indexIncremental)
1097	if err != nil {
1098		return err
1099	}
1100	defer func() {
1101		progErr := i.progress.finishIndex(m.tlfID)
1102		if progErr != nil {
1103			i.log.CDebugf(ctx, "Couldn't finish index: %+v", err)
1104		}
1105	}()
1106
1107	// Sort by path length, to make sure we process directories before
1108	// their children.
1109	sort.Slice(changes, func(i, j int) bool {
1110		return len(changes[i].CurrPath.Path) < len(changes[j].CurrPath.Path)
1111	})
1112
1113	fs, err := i.fsForRev(ctx, m.tlfID, newRev)
1114	if err != nil {
1115		return err
1116	}
1117
1118	// Save newRev as the started revision.
1119	err = i.tlfDb.Put(ctx, m.tlfID, indexedRev, newRev)
1120	if err != nil {
1121		return err
1122	}
1123
1124	defer func() {
1125		if err != nil {
1126			return
1127		}
1128
1129		// After a successful indexing, mark the revision as fully indexed.
1130		err = i.tlfDb.Put(ctx, m.tlfID, newRev, kbfsmd.RevisionUninitialized)
1131	}()
1132
1133	err = i.refreshBatch(ctx)
1134	if err != nil {
1135		return err
1136	}
1137
1138	defer func() {
1139		flushErr := i.flushBatch(ctx)
1140		if flushErr == nil {
1141			return
1142		}
1143		i.log.CDebugf(ctx, "Error flushing batch: %+v", flushErr)
1144		if err == nil {
1145			err = flushErr
1146		}
1147	}()
1148
1149	newChanges := 0
1150	for _, change := range changes {
1151		if change.IsNew {
1152			newChanges++
1153		}
1154	}
1155	ids, err := i.blocksDb.GetNextDocIDs(newChanges)
1156	if err != nil {
1157		return err
1158	}
1159	currID := 0
1160
1161	var dirDoneFns []func() error
1162	if len(changes) > 0 {
1163		// Update the root pointer first; it doesn't require re-indexing.
1164		oldPtr := changes[0].OldPtr
1165		changes = changes[1:]
1166		doUpdate := true
1167		_, docID, _, err := i.blocksDb.Get(ctx, oldPtr)
1168		switch errors.Cause(err) {
1169		case nil:
1170		case ldberrors.ErrNotFound:
1171			// The update already happened.
1172			doUpdate = false
1173		default:
1174			return err
1175		}
1176
1177		if doUpdate {
1178			dirDoneFn, err := i.recordUpdatedNodePtr(
1179				ctx, fs.RootNode(), newRev, docID, oldPtr)
1180			if err != nil {
1181				return err
1182			}
1183			defer func() {
1184				if err != nil {
1185					return
1186				}
1187
1188				err = dirDoneFn()
1189			}()
1190		}
1191	}
1192
1193	// Iterate through each change and call the appropriate index
1194	// function for it.
1195	for _, change := range changes {
1196		err := i.checkDone(ctx)
1197		if err != nil {
1198			return err
1199		}
1200
1201		plainPath, _ := change.CurrPath.PlaintextSansTlf()
1202		dir, _ := path.Split(plainPath)
1203		dirFS, err := fs.ChrootAsLibFS(path.Clean(dir))
1204		if err != nil {
1205			return err
1206		}
1207
1208		dirNode := dirFS.RootNode()
1209		md, err := i.config.KBFSOps().GetNodeMetadata(ctx, dirNode)
1210		if err != nil {
1211			return err
1212		}
1213		_, dirDocID, _, err := i.blocksDb.Get(ctx, md.BlockInfo.BlockPointer)
1214		if err != nil {
1215			return err
1216		}
1217
1218		switch change.Type {
1219		case libkbfs.ChangeTypeWrite:
1220			var dirDoneFn func() error
1221			if change.IsNew {
1222				id := ids[currID]
1223				currID++
1224				dirDoneFn, err = i.indexChild(
1225					ctx, dirNode, dirDocID, change.CurrPath.TailName(),
1226					id, newRev)
1227			} else {
1228				dirDoneFn, err = i.updateChild(
1229					ctx, dirNode, dirDocID, change.CurrPath.TailName(),
1230					change.OldPtr, newRev)
1231				switch errors.Cause(err).(type) {
1232				case OldPtrNotFound:
1233					// Already updated.
1234					err = nil
1235				default:
1236				}
1237			}
1238			if err != nil {
1239				return err
1240			}
1241			if dirDoneFn != nil {
1242				dirDoneFns = append(dirDoneFns, dirDoneFn)
1243			}
1244		case libkbfs.ChangeTypeRename:
1245			err := i.renameChild(
1246				ctx, dirNode, dirDocID, change.CurrPath.TailName(), newRev)
1247			if err != nil {
1248				return err
1249			}
1250		case libkbfs.ChangeTypeDelete:
1251			err := i.deleteFromUnrefs(ctx, m.tlfID, change.UnrefsForDelete)
1252			if err != nil {
1253				return err
1254			}
1255		default:
1256			i.log.CDebugf(ctx, "Ignoring unknown change type %s", change.Type)
1257			continue
1258		}
1259	}
1260
1261	// Finish all the dirs at the end, since we're not processing them
1262	// recursively.
1263	for _, f := range dirDoneFns {
1264		err := f()
1265		if err != nil {
1266			return err
1267		}
1268	}
1269
1270	return nil
1271}
1272
1273func (i *Indexer) handleTlfMessage(ctx context.Context, m tlfMessage) error {
1274	defer i.indexWG.Done()
1275
1276	doUnqueue := true
1277	defer func() {
1278		if doUnqueue {
1279			// We didn't end up indexing this TLF after all.
1280			i.progress.tlfUnqueue(m.tlfID)
1281		}
1282	}()
1283
1284	// Figure out which revision to lock to, for this
1285	// indexing scan.
1286	rev := m.rev
1287	if rev == kbfsmd.RevisionUninitialized {
1288		// TODO(HOTPOT-1504) -- remove indexing if the
1289		// mode is no longer synced.
1290		return nil
1291	}
1292
1293	indexedRev, startedRev, err := i.tlfDb.Get(ctx, m.tlfID)
1294	switch errors.Cause(err) {
1295	case nil:
1296	case ldberrors.ErrNotFound:
1297	default:
1298		return err
1299	}
1300
1301	if rev <= indexedRev {
1302		// No need to re-index this.
1303		return nil
1304	}
1305
1306	if startedRev != kbfsmd.RevisionUninitialized && startedRev != rev {
1307		// We've started indexing a particular revision already; we
1308		// need to continue on at that revision, or risk confusing the
1309		// index.  But re-add the message for this revision later.
1310		i.log.CDebugf(ctx, "Finishing incomplete index for revision %s for "+
1311			"TLF %s, before indexing the requested revision %d",
1312			startedRev, m.tlfID, rev)
1313		rev = startedRev
1314		i.indexWG.Add(1)
1315		select {
1316		case i.tlfCh <- m:
1317		default:
1318			i.indexWG.Done()
1319			i.log.CDebugf(
1320				context.Background(), "Couldn't send TLF message for %s/%d",
1321				m.tlfID, m.rev)
1322		}
1323	}
1324
1325	doUnqueue = false
1326	if indexedRev != kbfsmd.RevisionUninitialized {
1327		err = i.doIncrementalIndex(ctx, m, indexedRev, rev)
1328	} else {
1329		err = i.doFullIndex(ctx, m, rev)
1330	}
1331
1332	switch errors.Cause(err).(type) {
1333	case nil:
1334	case RevisionGCdError:
1335		// TODO(HOTPOT-1504) -- remove all documents from the index
1336		// and trigger a new indexing at the latest revision.
1337		i.log.CDebugf(
1338			ctx, "Ignoring a GC-revision failure for now (HOTPOT-1504): %+v",
1339			err)
1340		return nil
1341	default:
1342		return err
1343	}
1344
1345	return nil
1346}
1347
1348func (i *Indexer) loop(ctx context.Context) {
1349	defer i.loopWG.Done()
1350
1351	i.log.CDebugf(ctx, "Starting indexing loop")
1352	defer i.log.CDebugf(ctx, "Ending index loop")
1353
1354	// Wait for KBFSOps to be initialized, which might happen later
1355	// after the indexer.
1356	for i.config.KBFSOps() == nil {
1357		time.Sleep(1 * time.Second)
1358	}
1359	i.remoteStatus.Init(ctx, i.log, i.config, i)
1360	for i.config.Notifier() == nil {
1361		time.Sleep(1 * time.Second)
1362	}
1363	err := i.config.Notifier().RegisterForSyncedTlfs(i)
1364	if err != nil {
1365		i.log.CWarningf(
1366			ctx, "Couldn't register for synced TLF updates: %+v", err)
1367	}
1368
1369outerLoop:
1370	for {
1371		err := i.loadIndex(ctx)
1372		if err != nil {
1373			i.log.CDebugf(ctx, "Couldn't load index: %+v", err)
1374		}
1375
1376		state := keybase1.MobileAppState_FOREGROUND
1377		kbCtx := i.config.KbContext()
1378		for {
1379			select {
1380			case <-i.userChangedCh:
1381				// Re-load the index on each login/logout event.
1382				i.log.CDebugf(ctx, "User changed")
1383				continue outerLoop
1384			case state = <-kbCtx.NextAppStateUpdate(&state):
1385				// TODO(HOTPOT-1494): once we are doing actual
1386				// indexing in a separate goroutine, pause/unpause it
1387				// via a channel send from here.
1388				for state != keybase1.MobileAppState_FOREGROUND {
1389					i.log.CDebugf(ctx,
1390						"Pausing indexing while not foregrounded: state=%s",
1391						state)
1392					state = <-kbCtx.NextAppStateUpdate(&state)
1393				}
1394				i.log.CDebugf(ctx, "Resuming indexing while foregrounded")
1395				continue
1396			case m := <-i.tlfCh:
1397				ctx := i.makeContext(ctx)
1398				i.log.CDebugf(
1399					ctx, "Received TLF message for %s, rev=%d", m.tlfID, m.rev)
1400
1401				err = i.handleTlfMessage(ctx, m)
1402				if err != nil {
1403					i.log.CDebugf(ctx, "Error handling TLF message: %+v", err)
1404				}
1405			case <-ctx.Done():
1406				return
1407			case <-i.shutdownCh:
1408				i.cancelLoop()
1409				return
1410			}
1411		}
1412	}
1413}
1414
1415// Shutdown shuts down this indexer.
1416func (i *Indexer) Shutdown(ctx context.Context) error {
1417	close(i.shutdownCh)
1418	err := i.loopWG.Wait(ctx)
1419	if err != nil {
1420		return err
1421	}
1422
1423	i.lock.Lock()
1424	defer i.lock.Unlock()
1425
1426	return i.closeIndexLocked(ctx)
1427}
1428
1429func (i *Indexer) waitForIndex(ctx context.Context) error {
1430	ch := func() <-chan struct{} {
1431		i.lock.RLock()
1432		defer i.lock.RUnlock()
1433		return i.indexReadyCh
1434	}()
1435
1436	select {
1437	case <-ch:
1438		return nil
1439	case <-ctx.Done():
1440		return ctx.Err()
1441	}
1442}
1443
1444func (i *Indexer) waitForSyncs(ctx context.Context) error {
1445	return i.indexWG.Wait(ctx)
1446}
1447
1448// Search executes the given query and returns the results in the form
1449// of full KBFS paths to each hit.  `numResults` limits the number of
1450// returned results, and `startingResult` indicates the number of
1451// results that have been previously fetched -- basically it indicates
1452// the starting index number of the next page of desired results.  The
1453// return parameter `nextResult` indicates what `startingResult` could
1454// be set to next time, to get more results, where -1 indicates that
1455// there are no more results.
1456func (i *Indexer) Search(
1457	ctx context.Context, query string, numResults, startingResult int) (
1458	results []Result, nextResult int, err error) {
1459	if numResults == 0 {
1460		return nil, 0, nil
1461	}
1462
1463	i.lock.RLock()
1464	defer i.lock.RUnlock()
1465
1466	if i.index == nil {
1467		return nil, 0, errors.New("Index not loaded")
1468	}
1469
1470	sQuery := bleve.NewQueryStringQuery(query)
1471	nextResult = startingResult
1472	results = make([]Result, 0, numResults)
1473	usedPaths := make(map[string]bool)
1474resultLoop:
1475	for len(results) < numResults {
1476		req := bleve.NewSearchRequestOptions(
1477			sQuery, numResults, nextResult, false)
1478		indexResults, err := i.index.Search(req)
1479		if err != nil {
1480			return nil, 0, err
1481		}
1482
1483		// Build up the path for each result.
1484		for j, hit := range indexResults.Hits {
1485			docID := hit.ID
1486			var p []string // reversed list of path components
1487			for docID != "" {
1488				parentDocID, name, err := i.docDb.Get(
1489					ctx, strings.TrimPrefix(docID, nameDocIDPrefix))
1490				if err != nil {
1491					return nil, 0, err
1492				}
1493				p = append(p, name)
1494				docID = parentDocID
1495			}
1496
1497			// Reverse the path name.
1498			for k := len(p)/2 - 1; k >= 0; k-- {
1499				opp := len(p) - 1 - k
1500				p[k], p[opp] = p[opp], p[k]
1501			}
1502			fullPath := path.Join(p...)
1503			if usedPaths[fullPath] {
1504				continue
1505			}
1506			usedPaths[fullPath] = true
1507			results = append(results, Result{fullPath})
1508
1509			if len(results) >= numResults {
1510				nextResult += j + 1
1511				break resultLoop
1512			}
1513		}
1514
1515		nextResult += len(indexResults.Hits)
1516		if len(indexResults.Hits) < numResults {
1517			nextResult = -1
1518			break
1519		}
1520	}
1521
1522	return results, nextResult, nil
1523}
1524
1525// ResetIndex shuts down the current indexer, completely removes its
1526// on-disk presence, and then restarts it as a blank index.
1527func (i *Indexer) ResetIndex(ctx context.Context) (err error) {
1528	i.log.CDebugf(ctx, "Resetting the index")
1529	defer func() { i.log.CDebugf(ctx, "Done resetting the index: %+v", err) }()
1530
1531	err = i.Shutdown(ctx)
1532	if err != nil {
1533		return err
1534	}
1535
1536	dir := filepath.Join(i.config.StorageRoot(), indexStorageDir)
1537	err = ioutil.RemoveAll(dir)
1538	if err != nil {
1539		return err
1540	}
1541
1542	i.startLoop()
1543	return nil
1544}
1545
1546// Progress returns the progress instance of this indexer.
1547func (i *Indexer) Progress() *Progress {
1548	return i.progress
1549}
1550