1// Copyright 2016 Keybase Inc. All rights reserved.
2// Use of this source code is governed by a BSD
3// license that can be found in the LICENSE file.
4
5package libkbfs
6
7import (
8	"fmt"
9	pathlib "path"
10	"time"
11
12	"github.com/keybase/client/go/kbfs/data"
13	"github.com/keybase/client/go/kbfs/idutil"
14	"github.com/keybase/client/go/kbfs/kbfscodec"
15	"github.com/keybase/client/go/kbfs/kbfssync"
16	"github.com/keybase/client/go/kbfs/libkey"
17	"github.com/keybase/client/go/kbfs/tlf"
18	"github.com/keybase/client/go/kbfs/tlfhandle"
19	"github.com/keybase/client/go/libkb"
20	"github.com/keybase/client/go/logger"
21	"github.com/keybase/client/go/protocol/keybase1"
22	"github.com/pkg/errors"
23	"golang.org/x/net/context"
24	"golang.org/x/sync/errgroup"
25)
26
27type overallBlockState int
28
29const (
30	// cleanState: no outstanding local writes.
31	cleanState overallBlockState = iota
32	// dirtyState: there are outstanding local writes that haven't yet been
33	// synced.
34	dirtyState
35)
36
37const (
38	// numBlockSizeWorkersMax is the max number of workers to use when
39	// fetching a set of block sizes.
40	numBlockSizeWorkersMax = 50
41	// How many pointers to downgrade in a single block size call.
42	numBlockSizesPerChunk = 20
43	// truncateExtendCutoffPoint is the amount of data in extending
44	// truncate that will trigger the extending with a hole algorithm.
45	truncateExtendCutoffPoint = 128 * 1024
46)
47
48type mdToCleanIfUnused struct {
49	md  ReadOnlyRootMetadata
50	bps blockPutStateCopiable
51}
52
53type syncInfo struct {
54	oldInfo         data.BlockInfo
55	op              *syncOp
56	unrefs          []data.BlockInfo
57	bps             blockPutStateCopiable
58	refBytes        uint64
59	unrefBytes      uint64
60	toCleanIfUnused []mdToCleanIfUnused
61}
62
63func (si *syncInfo) DeepCopy(
64	ctx context.Context, codec kbfscodec.Codec) (newSi *syncInfo, err error) {
65	newSi = &syncInfo{
66		oldInfo:    si.oldInfo,
67		refBytes:   si.refBytes,
68		unrefBytes: si.unrefBytes,
69	}
70	newSi.unrefs = make([]data.BlockInfo, len(si.unrefs))
71	copy(newSi.unrefs, si.unrefs)
72	if si.bps != nil {
73		newSi.bps, err = si.bps.deepCopy(ctx)
74		if err != nil {
75			return nil, err
76		}
77	}
78	if si.op != nil {
79		err := kbfscodec.Update(codec, &newSi.op, si.op)
80		if err != nil {
81			return nil, err
82		}
83	}
84	newSi.toCleanIfUnused = make([]mdToCleanIfUnused, len(si.toCleanIfUnused))
85	for i, toClean := range si.toCleanIfUnused {
86		// It might be overkill to deep-copy these MDs and bpses,
87		// which are probably immutable, but for now let's do the safe
88		// thing.
89		copyMd, err := toClean.md.deepCopy(codec)
90		if err != nil {
91			return nil, err
92		}
93		newSi.toCleanIfUnused[i].md = copyMd.ReadOnly()
94		newSi.toCleanIfUnused[i].bps, err = toClean.bps.deepCopy(ctx)
95		if err != nil {
96			return nil, err
97		}
98	}
99	return newSi, nil
100}
101
102func (si *syncInfo) removeReplacedBlock(ctx context.Context,
103	log logger.Logger, ptr data.BlockPointer) {
104	for i, ref := range si.op.RefBlocks {
105		if ref == ptr {
106			log.CDebugf(ctx, "Replacing old ref %v", ptr)
107			si.op.RefBlocks = append(si.op.RefBlocks[:i],
108				si.op.RefBlocks[i+1:]...)
109			for j, unref := range si.unrefs {
110				if unref.BlockPointer == ptr {
111					si.unrefs = append(si.unrefs[:j], si.unrefs[j+1:]...)
112				}
113			}
114			break
115		}
116	}
117}
118
119func (si *syncInfo) mergeUnrefCache(md *RootMetadata) {
120	for _, info := range si.unrefs {
121		// it's ok if we push the same ptr.ID/RefNonce multiple times,
122		// because the subsequent ones should have a QuotaSize of 0.
123		md.AddUnrefBlock(info)
124	}
125}
126
127type deferredState struct {
128	// Writes and truncates for blocks that were being sync'd, and
129	// need to be replayed after the sync finishes on top of the new
130	// versions of the blocks.
131	writes []func(
132		context.Context, *kbfssync.LockState, KeyMetadataWithRootDirEntry,
133		data.Path) error
134	// Blocks that need to be deleted from the dirty cache before any
135	// deferred writes are replayed.
136	dirtyDeletes []data.BlockPointer
137	waitBytes    int64
138}
139
140// folderBlockOps contains all the fields that must be synchronized by
141// blockLock. It will eventually also contain all the methods that
142// must be synchronized by blockLock, so that folderBranchOps will
143// have no knowledge of blockLock.
144//
145// -- And now, a primer on tracking dirty bytes --
146//
147// The DirtyBlockCache tracks the number of bytes that are dirtied
148// system-wide, as the number of bytes that haven't yet been synced
149// ("unsynced"), and a number of bytes that haven't yet been resolved
150// yet because the overall file Sync hasn't finished yet ("total").
151// This data helps us decide when we need to block incoming Writes, in
152// order to keep memory usage from exploding.
153//
154// It's the responsibility of folderBlockOps (and its helper struct
155// dirtyFile) to update these totals in DirtyBlockCache for the
156// individual files within this TLF.  This is complicated by a few things:
157//   * New writes to a file are "deferred" while a Sync is happening, and
158//     are replayed after the Sync finishes.
159//   * Syncs can be canceled or error out halfway through syncing the blocks,
160//     leaving the file in a dirty state until the next Sync.
161//   * Syncs can fail with a /recoverable/ error, in which case they get
162//     retried automatically by folderBranchOps.  In that case, the retried
163//     Sync also sucks in any outstanding deferred writes.
164//
165// With all that in mind, here is the rough breakdown of how this
166// bytes-tracking is implemented:
167//   * On a Write/Truncate to a block, folderBranchOps counts all the
168//     newly-dirtied bytes in a file as "unsynced".  That is, if the block was
169//     already in the dirty cache (and not already being synced), only
170//     extensions to the block count as "unsynced" bytes.
171//   * When a Sync starts, dirtyFile remembers the total of bytes being synced,
172//     and the size of each block being synced.
173//   * When each block put finishes successfully, dirtyFile subtracts the size
174//     of that block from "unsynced".
175//   * When a Sync finishes successfully, the total sum of bytes in that sync
176//     are subtracted from the "total" dirty bytes outstanding.
177//   * If a Sync fails, but some blocks were put successfully, those blocks
178//     are "re-dirtied", which means they count as unsynced bytes again.
179//     dirtyFile handles this.
180//   * When a Write/Truncate is deferred due to an ongoing Sync, its bytes
181//     still count towards the "unsynced" total.  In fact, this essentially
182//     creates a new copy of those blocks, and the whole size of that block
183//     (not just the newly-dirtied bytes) count for the total.  However,
184//     when the write gets replayed, folderBlockOps first subtracts those bytes
185//     from the system-wide numbers, since they are about to be replayed.
186//   * When a Sync is retried after a recoverable failure, dirtyFile adds
187//     the newly-dirtied deferred bytes to the system-wide numbers, since they
188//     are now being assimilated into this Sync.
189//   * dirtyFile also exposes a concept of "orphaned" blocks.  These are child
190//     blocks being synced that are now referenced via a new, permanent block
191//     ID from the parent indirect block.  This matters for when hard failures
192//     occur during a Sync -- the blocks will no longer be accessible under
193//     their previous old pointers, and so dirtyFile needs to know their old
194//     bytes can be cleaned up now.
195type folderBlockOps struct {
196	config       Config
197	log          logger.Logger
198	vlog         *libkb.VDebugLog
199	folderBranch data.FolderBranch
200	observers    *observerList
201
202	// forceSyncChan can be sent on to trigger an immediate
203	// Sync().  It is a blocking channel.
204	forceSyncChan chan<- struct{}
205
206	// protects access to blocks in this folder and all fields
207	// below.
208	blockLock blockLock
209
210	// Which files are currently dirty and have dirty blocks that are either
211	// currently syncing, or waiting to be sync'd.
212	dirtyFiles map[data.BlockPointer]*data.DirtyFile
213
214	// For writes and truncates, track the unsynced to-be-unref'd
215	// block infos, per-path.
216	unrefCache map[data.BlockRef]*syncInfo
217
218	// dirtyDirs track which directories are currently dirty in this
219	// TLF.
220	dirtyDirs          map[data.BlockPointer][]data.BlockInfo
221	dirtyDirsSyncing   bool
222	deferredDirUpdates []func(lState *kbfssync.LockState) error
223
224	// dirtyRootDirEntry is a DirEntry representing the root of the
225	// TLF (to be copied into the RootMetadata on a sync).
226	dirtyRootDirEntry *data.DirEntry
227
228	chargedTo keybase1.UserOrTeamID
229
230	// Track deferred operations on a per-file basis.
231	deferred map[data.BlockRef]deferredState
232
233	// set to true if this write or truncate should be deferred
234	doDeferWrite bool
235
236	// While this channel is non-nil and non-closed, writes get blocked.
237	holdNewWritesCh <-chan struct{}
238
239	// nodeCache itself is goroutine-safe, but write/truncate must
240	// call PathFromNode() only under blockLock (see nodeCache
241	// comments in folder_branch_ops.go).
242	nodeCache NodeCache
243}
244
245// Only exported methods of folderBlockOps should be used outside of this
246// file.
247//
248// Although, temporarily, folderBranchOps is allowed to reach in and
249// manipulate folderBlockOps fields and methods directly.
250
251func (fbo *folderBlockOps) id() tlf.ID {
252	return fbo.folderBranch.Tlf
253}
254
255func (fbo *folderBlockOps) branch() data.BranchName {
256	return fbo.folderBranch.Branch
257}
258
259func (fbo *folderBlockOps) isSyncedTlf() bool {
260	return fbo.branch() == data.MasterBranch && fbo.config.IsSyncedTlf(fbo.id())
261}
262
263// GetState returns the overall block state of this TLF.
264func (fbo *folderBlockOps) GetState(
265	lState *kbfssync.LockState) overallBlockState {
266	fbo.blockLock.RLock(lState)
267	defer fbo.blockLock.RUnlock(lState)
268	if len(fbo.dirtyFiles) == 0 && len(fbo.dirtyDirs) == 0 &&
269		fbo.dirtyRootDirEntry == nil {
270		return cleanState
271	}
272	return dirtyState
273}
274
275// getCleanEncodedBlockSizesLocked retrieves the encoded sizes and
276// block statuses of the clean blocks pointed to each of the block
277// pointers in `ptrs`, which must be valid, either from the cache or
278// from the server.  If `rtype` is `blockReadParallel`, it's assumed
279// that some coordinating goroutine is holding the correct locks, and
280// in that case `lState` must be `nil`.
281func (fbo *folderBlockOps) getCleanEncodedBlockSizesLocked(ctx context.Context,
282	lState *kbfssync.LockState, kmd libkey.KeyMetadata,
283	ptrs []data.BlockPointer, branch data.BranchName,
284	rtype data.BlockReqType, assumeCacheIsLive bool) (
285	sizes []uint32, statuses []keybase1.BlockStatus, err error) {
286	if rtype != data.BlockReadParallel {
287		if rtype == data.BlockWrite {
288			panic("Cannot get the size of a block for writing")
289		}
290		fbo.blockLock.AssertAnyLocked(lState)
291	} else if lState != nil {
292		panic("Non-nil lState passed to getCleanEncodedBlockSizeLocked " +
293			"with blockReadParallel")
294	}
295
296	sizes = make([]uint32, len(ptrs))
297	statuses = make([]keybase1.BlockStatus, len(ptrs))
298	var toFetchIndices []int
299	var ptrsToFetch []data.BlockPointer
300	for i, ptr := range ptrs {
301		if !ptr.IsValid() {
302			return nil, nil, InvalidBlockRefError{ptr.Ref()}
303		}
304
305		if assumeCacheIsLive {
306			// If we're assuming all blocks in the cache are live, we just
307			// need to get the block size, which we can do from either one
308			// of the caches.
309			if block, err := fbo.config.BlockCache().Get(ptr); err == nil {
310				sizes[i] = block.GetEncodedSize()
311				statuses[i] = keybase1.BlockStatus_LIVE
312				continue
313			}
314			if diskBCache := fbo.config.DiskBlockCache(); diskBCache != nil {
315				cacheType := DiskBlockAnyCache
316				if fbo.isSyncedTlf() {
317					cacheType = DiskBlockSyncCache
318				}
319				if buf, _, _, err := diskBCache.Get(
320					ctx, fbo.id(), ptr.ID, cacheType); err == nil {
321					sizes[i] = uint32(len(buf))
322					statuses[i] = keybase1.BlockStatus_LIVE
323					continue
324				}
325			}
326		}
327
328		if err := checkDataVersion(fbo.config, data.Path{}, ptr); err != nil {
329			return nil, nil, err
330		}
331
332		// Fetch this block from the server.
333		ptrsToFetch = append(ptrsToFetch, ptr)
334		toFetchIndices = append(toFetchIndices, i)
335	}
336
337	defer func() {
338		fbo.vlog.CLogf(
339			ctx, libkb.VLog1, "GetEncodedSizes ptrs=%v sizes=%d statuses=%s: "+
340				"%+v", ptrs, sizes, statuses, err)
341		// In certain testing situations, a block might be represented
342		// with a 0 size in our journal or be missing from our local
343		// data stores, and we need to reconstruct the size using the
344		// cache in order to make the accounting work out for the test.
345		for i, ptr := range ptrs {
346			if sizes[i] == 0 {
347				if block, cerr := fbo.config.BlockCache().Get(
348					ptr); cerr == nil {
349					fbo.vlog.CLogf(
350						ctx, libkb.VLog1,
351						"Fixing encoded size of %v with cached copy", ptr)
352					sizes[i] = block.GetEncodedSize()
353				}
354			}
355		}
356	}()
357
358	// Unlock the blockLock while we wait for the network, only if
359	// it's locked for reading by a single goroutine.  If it's locked
360	// for writing, that indicates we are performing an atomic write
361	// operation, and we need to ensure that nothing else comes in and
362	// modifies the blocks, so don't unlock.
363	//
364	// If there may be multiple goroutines fetching blocks under the
365	// same lState, we can't safely unlock since some of the other
366	// goroutines may be operating on the data assuming they have the
367	// lock.
368	bops := fbo.config.BlockOps()
369	var fetchedSizes []uint32
370	var fetchedStatuses []keybase1.BlockStatus
371	if rtype != data.BlockReadParallel && rtype != data.BlockLookup {
372		fbo.blockLock.DoRUnlockedIfPossible(lState, func(*kbfssync.LockState) {
373			fetchedSizes, fetchedStatuses, err = bops.GetEncodedSizes(
374				ctx, kmd, ptrsToFetch)
375		})
376	} else {
377		fetchedSizes, fetchedStatuses, err = bops.GetEncodedSizes(
378			ctx, kmd, ptrsToFetch)
379	}
380	if err != nil {
381		return nil, nil, err
382	}
383
384	for i, j := range toFetchIndices {
385		sizes[j] = fetchedSizes[i]
386		statuses[j] = fetchedStatuses[i]
387	}
388
389	return sizes, statuses, nil
390}
391
392// getBlockHelperLocked retrieves the block pointed to by ptr, which
393// must be valid, either from the cache or from the server. If
394// notifyPath is valid and the block isn't cached, trigger a read
395// notification.  If `rtype` is `blockReadParallel`, it's assumed that
396// some coordinating goroutine is holding the correct locks, and
397// in that case `lState` must be `nil`.
398//
399// This must be called only by get{File,Dir}BlockHelperLocked().
400func (fbo *folderBlockOps) getBlockHelperLocked(ctx context.Context,
401	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
402	branch data.BranchName, newBlock makeNewBlock, lifetime data.BlockCacheLifetime,
403	notifyPath data.Path, rtype data.BlockReqType) (data.Block, error) {
404	if rtype != data.BlockReadParallel {
405		fbo.blockLock.AssertAnyLocked(lState)
406	} else if lState != nil {
407		panic("Non-nil lState passed to getBlockHelperLocked " +
408			"with blockReadParallel")
409	}
410
411	if !ptr.IsValid() {
412		return nil, InvalidBlockRefError{ptr.Ref()}
413	}
414
415	if block, err := fbo.config.DirtyBlockCache().Get(
416		ctx, fbo.id(), ptr, branch); err == nil {
417		return block, nil
418	}
419
420	if block, lifetime, err := fbo.config.BlockCache().GetWithLifetime(ptr); err == nil {
421		if lifetime != data.PermanentEntry {
422			// If the block was cached in the past, and is not a permanent
423			// block (i.e., currently being written by the user), we need
424			// to handle it as if it's an on-demand request so that its
425			// downstream prefetches are triggered correctly according to
426			// the new on-demand fetch priority.
427			action := fbo.config.Mode().DefaultBlockRequestAction()
428			if fbo.isSyncedTlf() {
429				action = action.AddSync()
430			}
431			prefetchStatus := fbo.config.PrefetchStatus(ctx, fbo.id(), ptr)
432			fbo.config.BlockOps().Prefetcher().ProcessBlockForPrefetch(ctx, ptr,
433				block, kmd, defaultOnDemandRequestPriority-1, lifetime,
434				prefetchStatus, action)
435		}
436		return block, nil
437	}
438
439	if err := checkDataVersion(fbo.config, notifyPath, ptr); err != nil {
440		return nil, err
441	}
442
443	if notifyPath.IsValidForNotification() {
444		fbo.config.Reporter().Notify(ctx, readNotification(notifyPath, false))
445		defer fbo.config.Reporter().Notify(ctx,
446			readNotification(notifyPath, true))
447	}
448
449	// Unlock the blockLock while we wait for the network, only if
450	// it's locked for reading by a single goroutine.  If it's locked
451	// for writing, that indicates we are performing an atomic write
452	// operation, and we need to ensure that nothing else comes in and
453	// modifies the blocks, so don't unlock.
454	//
455	// If there may be multiple goroutines fetching blocks under the
456	// same lState, we can't safely unlock since some of the other
457	// goroutines may be operating on the data assuming they have the
458	// lock.
459	// fetch the block, and add to cache
460	block := newBlock()
461	bops := fbo.config.BlockOps()
462	var err error
463	if rtype != data.BlockReadParallel && rtype != data.BlockLookup {
464		fbo.blockLock.DoRUnlockedIfPossible(lState, func(*kbfssync.LockState) {
465			err = bops.Get(ctx, kmd, ptr, block, lifetime, fbo.branch())
466		})
467	} else {
468		err = bops.Get(ctx, kmd, ptr, block, lifetime, fbo.branch())
469	}
470	if err != nil {
471		return nil, err
472	}
473
474	return block, nil
475}
476
477// getFileBlockHelperLocked retrieves the block pointed to by ptr,
478// which must be valid, either from an internal cache, the block
479// cache, or from the server. An error is returned if the retrieved
480// block is not a file block.  If `rtype` is `blockReadParallel`, it's
481// assumed that some coordinating goroutine is holding the correct
482// locks, and in that case `lState` must be `nil`.
483//
484// This must be called only by GetFileBlockForReading(),
485// getFileBlockLocked(), and getFileLocked().
486//
487// p is used only when reporting errors and sending read
488// notifications, and can be empty.
489func (fbo *folderBlockOps) getFileBlockHelperLocked(ctx context.Context,
490	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
491	branch data.BranchName, p data.Path, rtype data.BlockReqType) (
492	*data.FileBlock, error) {
493	if rtype != data.BlockReadParallel {
494		fbo.blockLock.AssertAnyLocked(lState)
495	} else if lState != nil {
496		panic("Non-nil lState passed to getFileBlockHelperLocked " +
497			"with blockReadParallel")
498	}
499
500	block, err := fbo.getBlockHelperLocked(
501		ctx, lState, kmd, ptr, branch, data.NewFileBlock, data.TransientEntry, p, rtype)
502	if err != nil {
503		return nil, err
504	}
505
506	fblock, ok := block.(*data.FileBlock)
507	if !ok {
508		return nil, NotFileBlockError{ptr, branch, p}
509	}
510
511	return fblock, nil
512}
513
514// GetCleanEncodedBlocksSizeSum retrieves the sum of the encoded sizes
515// of the blocks pointed to by ptrs, all of which must be valid,
516// either from the cache or from the server.
517//
518// The caller can specify a set of pointers using
519// `ignoreRecoverableForRemovalErrors` for which "recoverable" fetch
520// errors are tolerated.  In that case, the returned sum will not
521// include the size for any pointers in the
522// `ignoreRecoverableForRemovalErrors` set that hit such an error.
523//
524// This should be called for "internal" operations, like conflict
525// resolution and state checking, which don't know what kind of block
526// the pointers refer to.  Any downloaded blocks will not be cached,
527// if they weren't in the cache already.
528//
529// If `onlyCountIfLive` is true, the sum includes blocks that the
530// bserver thinks are currently reachable from the merged branch
531// (i.e., un-archived).
532func (fbo *folderBlockOps) GetCleanEncodedBlocksSizeSum(ctx context.Context,
533	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptrs []data.BlockPointer,
534	ignoreRecoverableForRemovalErrors map[data.BlockPointer]bool,
535	branch data.BranchName, onlyCountIfLive bool) (uint64, error) {
536	fbo.blockLock.RLock(lState)
537	defer fbo.blockLock.RUnlock(lState)
538
539	ptrCh := make(chan []data.BlockPointer, len(ptrs))
540	sumCh := make(chan uint32, len(ptrs))
541
542	numChunks := (len(ptrs) + numBlockSizesPerChunk - 1) /
543		numBlockSizesPerChunk
544	numWorkers := numBlockSizeWorkersMax
545	if numChunks < numWorkers {
546		numWorkers = numChunks
547	}
548
549	currChunk := make([]data.BlockPointer, 0, numBlockSizesPerChunk)
550	for _, ptr := range ptrs {
551		currChunk = append(currChunk, ptr)
552		if len(currChunk) == numBlockSizesPerChunk {
553			ptrCh <- currChunk
554			currChunk = make([]data.BlockPointer, 0, numBlockSizesPerChunk)
555		}
556	}
557	if len(currChunk) > 0 {
558		ptrCh <- currChunk
559	}
560
561	// If we don't care if something's live or not, there's no reason
562	// not to use the cached block.
563	assumeCacheIsLive := !onlyCountIfLive
564	eg, groupCtx := errgroup.WithContext(ctx)
565	for i := 0; i < numWorkers; i++ {
566		eg.Go(func() error {
567			for ptrs := range ptrCh {
568				sizes, statuses, err := fbo.getCleanEncodedBlockSizesLocked(
569					groupCtx, nil, kmd, ptrs, branch,
570					data.BlockReadParallel, assumeCacheIsLive)
571				for i, ptr := range ptrs {
572					// TODO: we might be able to recover the size of the
573					// top-most block of a removed file using the merged
574					// directory entry, the same way we do in
575					// `folderBranchOps.unrefEntry`.
576					if isRecoverableBlockErrorForRemoval(err) &&
577						ignoreRecoverableForRemovalErrors[ptr] {
578						fbo.log.CDebugf(
579							groupCtx, "Hit an ignorable, recoverable "+
580								"error for block %v: %v", ptr, err)
581						continue
582					}
583					if err != nil {
584						return err
585					}
586
587					if onlyCountIfLive &&
588						statuses[i] != keybase1.BlockStatus_LIVE {
589						sumCh <- 0
590					} else {
591						sumCh <- sizes[i]
592					}
593				}
594			}
595			return nil
596		})
597	}
598	close(ptrCh)
599
600	if err := eg.Wait(); err != nil {
601		return 0, err
602	}
603	close(sumCh)
604
605	var sum uint64
606	for size := range sumCh {
607		sum += uint64(size)
608	}
609	return sum, nil
610}
611
612// getDirBlockHelperLocked retrieves the block pointed to by ptr, which
613// must be valid, either from the cache or from the server. An error
614// is returned if the retrieved block is not a dir block.
615//
616// This must be called only by GetDirBlockForReading() and
617// getDirLocked().
618//
619// p is used only when reporting errors, and can be empty.
620func (fbo *folderBlockOps) getDirBlockHelperLocked(ctx context.Context,
621	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
622	branch data.BranchName, p data.Path, rtype data.BlockReqType) (*data.DirBlock, error) {
623	if rtype != data.BlockReadParallel {
624		fbo.blockLock.AssertAnyLocked(lState)
625	}
626
627	// Check data version explicitly here, with the right path, since
628	// we pass an empty path below.
629	if err := checkDataVersion(fbo.config, p, ptr); err != nil {
630		return nil, err
631	}
632
633	// Pass in an empty notify path because notifications should only
634	// trigger for file reads.
635	block, err := fbo.getBlockHelperLocked(
636		ctx, lState, kmd, ptr, branch, data.NewDirBlock, data.TransientEntry,
637		data.Path{}, rtype)
638	if err != nil {
639		return nil, err
640	}
641
642	dblock, ok := block.(*data.DirBlock)
643	if !ok {
644		return nil, NotDirBlockError{ptr, branch, p}
645	}
646
647	return dblock, nil
648}
649
650// GetFileBlockForReading retrieves the block pointed to by ptr, which
651// must be valid, either from the cache or from the server. An error
652// is returned if the retrieved block is not a file block.
653//
654// This should be called for "internal" operations, like conflict
655// resolution and state checking. "Real" operations should use
656// getFileBlockLocked() and getFileLocked() instead.
657//
658// p is used only when reporting errors, and can be empty.
659func (fbo *folderBlockOps) GetFileBlockForReading(ctx context.Context,
660	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
661	branch data.BranchName, p data.Path) (*data.FileBlock, error) {
662	fbo.blockLock.RLock(lState)
663	defer fbo.blockLock.RUnlock(lState)
664	return fbo.getFileBlockHelperLocked(
665		ctx, lState, kmd, ptr, branch, p, data.BlockRead)
666}
667
668// GetDirBlockForReading retrieves the block pointed to by ptr, which
669// must be valid, either from the cache or from the server. An error
670// is returned if the retrieved block is not a dir block.
671//
672// This should be called for "internal" operations, like conflict
673// resolution and state checking. "Real" operations should use
674// getDirLocked() instead.
675//
676// p is used only when reporting errors, and can be empty.
677func (fbo *folderBlockOps) GetDirBlockForReading(ctx context.Context,
678	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
679	branch data.BranchName, p data.Path) (*data.DirBlock, error) {
680	fbo.blockLock.RLock(lState)
681	defer fbo.blockLock.RUnlock(lState)
682	return fbo.getDirBlockHelperLocked(
683		ctx, lState, kmd, ptr, branch, p, data.BlockRead)
684}
685
686// getFileBlockLocked retrieves the block pointed to by ptr, which
687// must be valid, either from the cache or from the server. An error
688// is returned if the retrieved block is not a file block.
689//
690// The given path must be valid, and the given pointer must be its
691// tail pointer or an indirect pointer from it. A read notification is
692// triggered for the given path only if the block isn't in the cache.
693//
694// This shouldn't be called for "internal" operations, like conflict
695// resolution and state checking -- use GetFileBlockForReading() for
696// those instead.
697//
698// When rtype == blockWrite and the cached version of the block is
699// currently clean, or the block is currently being synced, this
700// method makes a copy of the file block and returns it.  If this
701// method might be called again for the same block within a single
702// operation, it is the caller's responsibility to write that block
703// back to the cache as dirty.
704//
705// Note that blockLock must be locked exactly when rtype ==
706// blockWrite, and must be r-locked when rtype == blockRead.  (This
707// differs from getDirLocked.)  This is because a write operation
708// (like write, truncate and sync which lock blockLock) fetching a
709// file block will almost always need to modify that block, and so
710// will pass in blockWrite.  If rtype == blockReadParallel, it's
711// assumed that some coordinating goroutine is holding the correct
712// locks, and in that case `lState` must be `nil`.
713//
714// file is used only when reporting errors and sending read
715// notifications, and can be empty except that file.Branch must be set
716// correctly.
717//
718// This method also returns whether the block was already dirty.
719func (fbo *folderBlockOps) getFileBlockLocked(ctx context.Context,
720	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
721	file data.Path, rtype data.BlockReqType) (
722	fblock *data.FileBlock, wasDirty bool, err error) {
723	switch rtype {
724	case data.BlockRead:
725		fbo.blockLock.AssertRLocked(lState)
726	case data.BlockWrite:
727		fbo.blockLock.AssertLocked(lState)
728	case data.BlockReadParallel:
729		// This goroutine might not be the official lock holder, so
730		// don't make any assertions.
731		if lState != nil {
732			panic("Non-nil lState passed to getFileBlockLocked " +
733				"with blockReadParallel")
734		}
735	case data.BlockLookup:
736		panic("blockLookup should only be used for directory blocks")
737	default:
738		panic(fmt.Sprintf("Unknown block req type: %d", rtype))
739	}
740
741	fblock, err = fbo.getFileBlockHelperLocked(
742		ctx, lState, kmd, ptr, file.Branch, file, rtype)
743	if err != nil {
744		return nil, false, err
745	}
746
747	wasDirty = fbo.config.DirtyBlockCache().IsDirty(fbo.id(), ptr, file.Branch)
748	if rtype == data.BlockWrite {
749		// Copy the block if it's for writing, and either the
750		// block is not yet dirty or the block is currently
751		// being sync'd and needs a copy even though it's
752		// already dirty.
753		df := fbo.dirtyFiles[file.TailPointer()]
754		if !wasDirty || (df != nil && df.BlockNeedsCopy(ptr)) {
755			fblock = fblock.DeepCopy()
756		}
757	}
758	return fblock, wasDirty, nil
759}
760
761// getFileLocked is getFileBlockLocked called with file.tailPointer().
762func (fbo *folderBlockOps) getFileLocked(ctx context.Context,
763	lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path,
764	rtype data.BlockReqType) (*data.FileBlock, error) {
765	// Callers should have already done this check, but it doesn't
766	// hurt to do it again.
767	if !file.IsValid() {
768		return nil, errors.WithStack(InvalidPathError{file})
769	}
770	fblock, _, err := fbo.getFileBlockLocked(
771		ctx, lState, kmd, file.TailPointer(), file, rtype)
772	return fblock, err
773}
774
775func (fbo *folderBlockOps) getIndirectFileBlockInfosLocked(
776	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
777	file data.Path) ([]data.BlockInfo, error) {
778	fbo.blockLock.AssertRLocked(lState)
779	var id keybase1.UserOrTeamID // Data reads don't depend on the id.
780	fd := fbo.newFileData(lState, file, id, kmd)
781	return fd.GetIndirectFileBlockInfos(ctx)
782}
783
784// GetIndirectFileBlockInfos returns a list of BlockInfos for all
785// indirect blocks of the given file. If the returned error is a
786// recoverable one (as determined by
787// isRecoverableBlockErrorForRemoval), the returned list may still be
788// non-empty, and holds all the BlockInfos for all found indirect
789// blocks.
790func (fbo *folderBlockOps) GetIndirectFileBlockInfos(ctx context.Context,
791	lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path) (
792	[]data.BlockInfo, error) {
793	fbo.blockLock.RLock(lState)
794	defer fbo.blockLock.RUnlock(lState)
795	return fbo.getIndirectFileBlockInfosLocked(ctx, lState, kmd, file)
796}
797
798// GetIndirectDirBlockInfos returns a list of BlockInfos for all
799// indirect blocks of the given directory. If the returned error is a
800// recoverable one (as determined by
801// isRecoverableBlockErrorForRemoval), the returned list may still be
802// non-empty, and holds all the BlockInfos for all found indirect
803// blocks.
804func (fbo *folderBlockOps) GetIndirectDirBlockInfos(
805	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
806	dir data.Path) ([]data.BlockInfo, error) {
807	fbo.blockLock.RLock(lState)
808	defer fbo.blockLock.RUnlock(lState)
809	var id keybase1.UserOrTeamID // Data reads don't depend on the id.
810	fd := fbo.newDirDataLocked(lState, dir, id, kmd)
811	return fd.GetIndirectDirBlockInfos(ctx)
812}
813
814// GetIndirectFileBlockInfosWithTopBlock returns a list of BlockInfos
815// for all indirect blocks of the given file, starting from the given
816// top-most block. If the returned error is a recoverable one (as
817// determined by isRecoverableBlockErrorForRemoval), the returned list
818// may still be non-empty, and holds all the BlockInfos for all found
819// indirect blocks. (This will be relevant when we handle multiple
820// levels of indirection.)
821func (fbo *folderBlockOps) GetIndirectFileBlockInfosWithTopBlock(
822	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path,
823	topBlock *data.FileBlock) (
824	[]data.BlockInfo, error) {
825	fbo.blockLock.RLock(lState)
826	defer fbo.blockLock.RUnlock(lState)
827	var id keybase1.UserOrTeamID // Data reads don't depend on the id.
828	fd := fbo.newFileData(lState, file, id, kmd)
829	return fd.GetIndirectFileBlockInfosWithTopBlock(ctx, topBlock)
830}
831
832func (fbo *folderBlockOps) getChargedToLocked(
833	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata) (
834	keybase1.UserOrTeamID, error) {
835	fbo.blockLock.AssertAnyLocked(lState)
836	if !fbo.chargedTo.IsNil() {
837		return fbo.chargedTo, nil
838	}
839	chargedTo, err := chargedToForTLF(
840		ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), fbo.config,
841		kmd.GetTlfHandle())
842	if err != nil {
843		return keybase1.UserOrTeamID(""), err
844	}
845	fbo.chargedTo = chargedTo
846	return chargedTo, nil
847}
848
849// ClearChargedTo clears out the cached chargedTo UID for this FBO.
850func (fbo *folderBlockOps) ClearChargedTo(lState *kbfssync.LockState) {
851	fbo.blockLock.Lock(lState)
852	defer fbo.blockLock.Unlock(lState)
853	fbo.chargedTo = keybase1.UserOrTeamID("")
854}
855
856// DeepCopyFile makes a complete copy of the given file, deduping leaf
857// blocks and making new random BlockPointers for all indirect blocks.
858// It returns the new top pointer of the copy, and all the new child
859// pointers in the copy.  It takes a custom DirtyBlockCache, which
860// directs where the resulting block copies are stored.
861func (fbo *folderBlockOps) deepCopyFileLocked(
862	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path,
863	dirtyBcache data.DirtyBlockCacheSimple, dataVer data.Ver) (
864	newTopPtr data.BlockPointer, allChildPtrs []data.BlockPointer, err error) {
865	// Deep copying doesn't alter any data in use, it only makes copy,
866	// so only a read lock is needed.
867	fbo.blockLock.AssertRLocked(lState)
868	chargedTo, err := chargedToForTLF(
869		ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), fbo.config,
870		kmd.GetTlfHandle())
871	if err != nil {
872		return data.BlockPointer{}, nil, err
873	}
874	fd := fbo.newFileDataWithCache(
875		lState, file, chargedTo, kmd, dirtyBcache)
876	return fd.DeepCopy(ctx, dataVer)
877}
878
879func (fbo *folderBlockOps) cacheHashBehavior() data.BlockCacheHashBehavior {
880	return cacheHashBehavior(fbo.config, fbo.config, fbo.id())
881}
882
883func (fbo *folderBlockOps) UndupChildrenInCopy(ctx context.Context,
884	lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path, bps blockPutState,
885	dirtyBcache data.DirtyBlockCacheSimple, topBlock *data.FileBlock) (
886	[]data.BlockInfo, error) {
887	fbo.blockLock.Lock(lState)
888	defer fbo.blockLock.Unlock(lState)
889	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
890	if err != nil {
891		return nil, err
892	}
893	fd := fbo.newFileDataWithCache(
894		lState, file, chargedTo, kmd, dirtyBcache)
895	return fd.UndupChildrenInCopy(ctx, fbo.config.BlockCache(),
896		fbo.config.BlockOps(), bps, topBlock, fbo.cacheHashBehavior())
897}
898
899func (fbo *folderBlockOps) ReadyNonLeafBlocksInCopy(ctx context.Context,
900	lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path, bps blockPutState,
901	dirtyBcache data.DirtyBlockCacheSimple, topBlock *data.FileBlock) (
902	[]data.BlockInfo, error) {
903	fbo.blockLock.RLock(lState)
904	defer fbo.blockLock.RUnlock(lState)
905	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
906	if err != nil {
907		return nil, err
908	}
909
910	fd := fbo.newFileDataWithCache(
911		lState, file, chargedTo, kmd, dirtyBcache)
912	return fd.ReadyNonLeafBlocksInCopy(ctx, fbo.config.BlockCache(),
913		fbo.config.BlockOps(), bps, topBlock, fbo.cacheHashBehavior())
914}
915
916// getDirLocked retrieves the block pointed to by the tail pointer of
917// the given path, which must be valid, either from the cache or from
918// the server. An error is returned if the retrieved block is not a
919// dir block.
920//
921// This shouldn't be called for "internal" operations, like conflict
922// resolution and state checking -- use GetDirBlockForReading() for
923// those instead.
924//
925// When rtype == blockWrite and the cached version of the block is
926// currently clean, this method makes a copy of the directory block
927// and returns it.  If this method might be called again for the same
928// block within a single operation, it is the caller's responsibility
929// to write that block back to the cache as dirty.
930//
931// Note that blockLock must be either r-locked or locked, but
932// independently of rtype. (This differs from getFileLocked and
933// getFileBlockLocked.) File write operations (which lock blockLock)
934// don't need a copy of parent dir blocks, and non-file write
935// operations do need to copy dir blocks for modifications.
936func (fbo *folderBlockOps) getDirLocked(ctx context.Context,
937	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer, dir data.Path,
938	rtype data.BlockReqType) (*data.DirBlock, bool, error) {
939	switch rtype {
940	case data.BlockRead, data.BlockWrite, data.BlockLookup:
941		fbo.blockLock.AssertAnyLocked(lState)
942	case data.BlockReadParallel:
943		// This goroutine might not be the official lock holder, so
944		// don't make any assertions.
945		if lState != nil {
946			panic("Non-nil lState passed to getFileBlockLocked " +
947				"with blockReadParallel")
948		}
949	default:
950		panic(fmt.Sprintf("Unknown block req type: %d", rtype))
951	}
952
953	// Callers should have already done this check, but it doesn't
954	// hurt to do it again.
955	if !dir.IsValid() {
956		return nil, false, errors.WithStack(InvalidPathError{dir})
957	}
958
959	// Get the block for the last element in the path.
960	dblock, err := fbo.getDirBlockHelperLocked(
961		ctx, lState, kmd, ptr, dir.Branch, dir, rtype)
962	if err != nil {
963		return nil, false, err
964	}
965
966	wasDirty := fbo.config.DirtyBlockCache().IsDirty(fbo.id(), ptr, dir.Branch)
967	if rtype == data.BlockWrite && !wasDirty {
968		// Copy the block if it's for writing and the block is
969		// not yet dirty.
970		dblock = dblock.DeepCopy()
971	}
972	return dblock, wasDirty, nil
973}
974
975// GetDir retrieves the block pointed to by the tail pointer of the
976// given path, which must be valid, either from the cache or from the
977// server. An error is returned if the retrieved block is not a dir
978// block.
979//
980// This shouldn't be called for "internal" operations, like conflict
981// resolution and state checking -- use GetDirBlockForReading() for
982// those instead.
983//
984// When rtype == blockWrite and the cached version of the block is
985// currently clean, this method makes a copy of the directory block
986// and returns it.  If this method might be called again for the same
987// block within a single operation, it is the caller's responsibility
988// to write that block back to the cache as dirty.
989func (fbo *folderBlockOps) GetDir(
990	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, dir data.Path,
991	rtype data.BlockReqType) (*data.DirBlock, error) {
992	fbo.blockLock.RLock(lState)
993	defer fbo.blockLock.RUnlock(lState)
994	dblock, _, err := fbo.getDirLocked(
995		ctx, lState, kmd, dir.TailPointer(), dir, rtype)
996	return dblock, err
997}
998
999type dirCacheUndoFn func(lState *kbfssync.LockState)
1000
1001func (fbo *folderBlockOps) wrapWithBlockLock(fn func()) dirCacheUndoFn {
1002	return func(lState *kbfssync.LockState) {
1003		if fn == nil {
1004			return
1005		}
1006		fbo.blockLock.Lock(lState)
1007		defer fbo.blockLock.Unlock(lState)
1008		fn()
1009	}
1010}
1011
1012func (fbo *folderBlockOps) newDirDataLocked(lState *kbfssync.LockState,
1013	dir data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata) *data.DirData {
1014	fbo.blockLock.AssertAnyLocked(lState)
1015	return data.NewDirData(dir, chargedTo, fbo.config.BlockSplitter(), kmd,
1016		func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
1017			dir data.Path, rtype data.BlockReqType) (*data.DirBlock, bool, error) {
1018			lState := lState
1019			if rtype == data.BlockReadParallel {
1020				lState = nil
1021			}
1022			return fbo.getDirLocked(
1023				ctx, lState, kmd, ptr, dir, rtype)
1024		},
1025		func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
1026			return fbo.config.DirtyBlockCache().Put(
1027				ctx, fbo.id(), ptr, dir.Branch, block)
1028		}, fbo.log, fbo.vlog)
1029}
1030
1031// newDirDataWithDBMLocked creates a new `dirData` that reads from and
1032// puts into a local dir block cache.  If it reads a block out from
1033// anything but the `dbm`, it makes a copy of it before inserting it
1034// into the `dbm`.
1035func (fbo *folderBlockOps) newDirDataWithDBMLocked(lState *kbfssync.LockState,
1036	dir data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata,
1037	dbm dirBlockMap) *data.DirData {
1038	fbo.blockLock.AssertRLocked(lState)
1039	return data.NewDirData(dir, chargedTo, fbo.config.BlockSplitter(), kmd,
1040		func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
1041			dir data.Path, rtype data.BlockReqType) (*data.DirBlock, bool, error) {
1042			hasBlock, err := dbm.hasBlock(ctx, ptr)
1043			if err != nil {
1044				return nil, false, err
1045			}
1046			if hasBlock {
1047				block, err := dbm.getBlock(ctx, ptr)
1048				if err != nil {
1049					return nil, false, err
1050				}
1051				return block, true, nil
1052			}
1053
1054			localLState := lState
1055			getRtype := rtype
1056			switch rtype {
1057			case data.BlockReadParallel:
1058				localLState = nil
1059			case data.BlockWrite:
1060				getRtype = data.BlockRead
1061			}
1062
1063			block, wasDirty, err := fbo.getDirLocked(
1064				ctx, localLState, kmd, ptr, dir, getRtype)
1065			if err != nil {
1066				return nil, false, err
1067			}
1068
1069			if rtype == data.BlockWrite {
1070				// Make a copy before we stick it in the local block cache.
1071				block = block.DeepCopy()
1072				err = dbm.putBlock(ctx, ptr, block)
1073				if err != nil {
1074					return nil, false, err
1075				}
1076			}
1077			return block, wasDirty, nil
1078		},
1079		func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
1080			return dbm.putBlock(ctx, ptr, block.(*data.DirBlock))
1081		}, fbo.log, fbo.vlog)
1082}
1083
1084// newDirDataWithDBM is like `newDirDataWithDBMLocked`, but it must be
1085// called with `blockLock` unlocked, and the returned function must be
1086// called when the returned `dirData` is no longer in use.
1087func (fbo *folderBlockOps) newDirDataWithDBM(
1088	lState *kbfssync.LockState, dir data.Path, chargedTo keybase1.UserOrTeamID,
1089	kmd libkey.KeyMetadata, dbm dirBlockMap) (*data.DirData, func()) {
1090	// Lock and fetch for reading only, we want any dirty
1091	// blocks to go into the dbm.
1092	fbo.blockLock.RLock(lState)
1093	cleanupFn := func() { fbo.blockLock.RUnlock(lState) }
1094	return fbo.newDirDataWithDBMLocked(lState, dir, chargedTo, kmd, dbm),
1095		cleanupFn
1096}
1097
1098func (fbo *folderBlockOps) makeDirDirtyLocked(
1099	lState *kbfssync.LockState, ptr data.BlockPointer, unrefs []data.BlockInfo) func() {
1100	fbo.blockLock.AssertLocked(lState)
1101	oldUnrefs, wasDirty := fbo.dirtyDirs[ptr]
1102	oldLen := len(oldUnrefs)
1103	fbo.dirtyDirs[ptr] = append(oldUnrefs, unrefs...)
1104	return func() {
1105		dirtyBcache := fbo.config.DirtyBlockCache()
1106		if wasDirty {
1107			fbo.dirtyDirs[ptr] = oldUnrefs[:oldLen:oldLen]
1108		} else {
1109			_ = dirtyBcache.Delete(fbo.id(), ptr, fbo.branch())
1110			delete(fbo.dirtyDirs, ptr)
1111		}
1112		for _, unref := range unrefs {
1113			_ = dirtyBcache.Delete(fbo.id(), unref.BlockPointer, fbo.branch())
1114		}
1115	}
1116}
1117
1118func (fbo *folderBlockOps) updateParentDirEntryLocked(
1119	ctx context.Context, lState *kbfssync.LockState, dir data.Path,
1120	kmd KeyMetadataWithRootDirEntry, setMtime, setCtime bool) (func(), error) {
1121	fbo.blockLock.AssertLocked(lState)
1122	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
1123	if err != nil {
1124		return nil, err
1125	}
1126	now := fbo.nowUnixNano()
1127	pp := *dir.ParentPath()
1128	if pp.IsValid() {
1129		dd := fbo.newDirDataLocked(lState, pp, chargedTo, kmd)
1130		de, err := dd.Lookup(ctx, dir.TailName())
1131		if err != nil {
1132			return nil, err
1133		}
1134		newDe := de
1135		if setMtime {
1136			newDe.Mtime = now
1137		}
1138		if setCtime {
1139			newDe.Ctime = now
1140		}
1141		unrefs, err := dd.UpdateEntry(ctx, dir.TailName(), newDe)
1142		if err != nil {
1143			return nil, err
1144		}
1145		undoDirtyFn := fbo.makeDirDirtyLocked(lState, pp.TailPointer(), unrefs)
1146		return func() {
1147			_, _ = dd.UpdateEntry(ctx, dir.TailName(), de)
1148			undoDirtyFn()
1149		}, nil
1150	}
1151
1152	// If the parent isn't a valid path, we need to update the root entry.
1153	var de *data.DirEntry
1154	if fbo.dirtyRootDirEntry == nil {
1155		deCopy := kmd.GetRootDirEntry()
1156		fbo.dirtyRootDirEntry = &deCopy
1157	} else {
1158		deCopy := *fbo.dirtyRootDirEntry
1159		de = &deCopy
1160	}
1161	if setMtime {
1162		fbo.dirtyRootDirEntry.Mtime = now
1163	}
1164	if setCtime {
1165		fbo.dirtyRootDirEntry.Ctime = now
1166	}
1167	return func() {
1168		fbo.dirtyRootDirEntry = de
1169	}, nil
1170}
1171
1172func (fbo *folderBlockOps) addDirEntryInCacheLocked(
1173	ctx context.Context, lState *kbfssync.LockState,
1174	kmd KeyMetadataWithRootDirEntry, dir data.Path, newName data.PathPartString,
1175	newDe data.DirEntry) (func(), error) {
1176	fbo.blockLock.AssertLocked(lState)
1177
1178	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
1179	if err != nil {
1180		return nil, err
1181	}
1182	dd := fbo.newDirDataLocked(lState, dir, chargedTo, kmd)
1183	unrefs, err := dd.AddEntry(ctx, newName, newDe)
1184	if err != nil {
1185		return nil, err
1186	}
1187	parentUndo, err := fbo.updateParentDirEntryLocked(
1188		ctx, lState, dir, kmd, true, true)
1189	if err != nil {
1190		_, _ = dd.RemoveEntry(ctx, newName)
1191		return nil, err
1192	}
1193
1194	undoDirtyFn := fbo.makeDirDirtyLocked(lState, dir.TailPointer(), unrefs)
1195	return func() {
1196		_, _ = dd.RemoveEntry(ctx, newName)
1197		undoDirtyFn()
1198		parentUndo()
1199	}, nil
1200}
1201
1202// AddDirEntryInCache adds a brand new entry to the given directory
1203// and updates the directory's own mtime and ctime.  It returns a
1204// function that can be called if the change needs to be undone.
1205func (fbo *folderBlockOps) AddDirEntryInCache(
1206	ctx context.Context, lState *kbfssync.LockState,
1207	kmd KeyMetadataWithRootDirEntry, dir data.Path, newName data.PathPartString,
1208	newDe data.DirEntry) (dirCacheUndoFn, error) {
1209	fbo.blockLock.Lock(lState)
1210	defer fbo.blockLock.Unlock(lState)
1211	fn, err := fbo.addDirEntryInCacheLocked(
1212		ctx, lState, kmd, dir, newName, newDe)
1213	if err != nil {
1214		return nil, err
1215	}
1216	return fbo.wrapWithBlockLock(fn), nil
1217}
1218
1219func (fbo *folderBlockOps) removeDirEntryInCacheLocked(
1220	ctx context.Context, lState *kbfssync.LockState,
1221	kmd KeyMetadataWithRootDirEntry, dir data.Path, oldName data.PathPartString,
1222	oldDe data.DirEntry) (func(), error) {
1223	fbo.blockLock.AssertLocked(lState)
1224
1225	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
1226	if err != nil {
1227		return nil, err
1228	}
1229	dd := fbo.newDirDataLocked(lState, dir, chargedTo, kmd)
1230	unrefs, err := dd.RemoveEntry(ctx, oldName)
1231	if err != nil {
1232		return nil, err
1233	}
1234	if oldDe.Type == data.Dir {
1235		// The parent dir inherits any dirty unrefs from the removed
1236		// directory.
1237		if childUnrefs, ok := fbo.dirtyDirs[oldDe.BlockPointer]; ok {
1238			unrefs = append(unrefs, childUnrefs...)
1239		}
1240	}
1241
1242	unlinkUndoFn := fbo.nodeCache.Unlink(
1243		oldDe.Ref(), dir.ChildPath(
1244			oldName, oldDe.BlockPointer, fbo.nodeCache.ObfuscatorMaker()()),
1245		oldDe)
1246
1247	parentUndo, err := fbo.updateParentDirEntryLocked(
1248		ctx, lState, dir, kmd, true, true)
1249	if err != nil {
1250		if unlinkUndoFn != nil {
1251			unlinkUndoFn()
1252		}
1253		_, _ = dd.AddEntry(ctx, oldName, oldDe)
1254		return nil, err
1255	}
1256
1257	undoDirtyFn := fbo.makeDirDirtyLocked(lState, dir.TailPointer(), unrefs)
1258	return func() {
1259		_, _ = dd.AddEntry(ctx, oldName, oldDe)
1260		if undoDirtyFn != nil {
1261			undoDirtyFn()
1262		}
1263		if parentUndo != nil {
1264			parentUndo()
1265		}
1266		if unlinkUndoFn != nil {
1267			unlinkUndoFn()
1268		}
1269	}, nil
1270}
1271
1272// RemoveDirEntryInCache removes an entry from the given directory //
1273// and updates the directory's own mtime and ctime.  It returns a
1274// function that can be called if the change needs to be undone.
1275func (fbo *folderBlockOps) RemoveDirEntryInCache(
1276	ctx context.Context, lState *kbfssync.LockState,
1277	kmd KeyMetadataWithRootDirEntry, dir data.Path, oldName data.PathPartString,
1278	oldDe data.DirEntry) (dirCacheUndoFn, error) {
1279	fbo.blockLock.Lock(lState)
1280	defer fbo.blockLock.Unlock(lState)
1281	fn, err := fbo.removeDirEntryInCacheLocked(
1282		ctx, lState, kmd, dir, oldName, oldDe)
1283	if err != nil {
1284		return nil, err
1285	}
1286	return fbo.wrapWithBlockLock(fn), nil
1287}
1288
1289// RenameDirEntryInCache updates the entries of both the old and new
1290// parent dirs for the given target dir atomically (with respect to
1291// blockLock).  It also updates the cache entry for the target, which
1292// would have its Ctime changed. The updates will get applied to the
1293// dirty blocks on subsequent fetches.
1294//
1295// The returned bool indicates whether or not the caller should clean
1296// up the target cache entry when the effects of the operation are no
1297// longer needed.
1298func (fbo *folderBlockOps) RenameDirEntryInCache(
1299	ctx context.Context, lState *kbfssync.LockState,
1300	kmd KeyMetadataWithRootDirEntry, oldParent data.Path,
1301	oldName data.PathPartString, newParent data.Path,
1302	newName data.PathPartString, newDe data.DirEntry,
1303	replacedDe data.DirEntry) (undo dirCacheUndoFn, err error) {
1304	fbo.blockLock.Lock(lState)
1305	defer fbo.blockLock.Unlock(lState)
1306	if newParent.TailPointer() == oldParent.TailPointer() &&
1307		oldName == newName {
1308		// Noop
1309		return nil, nil
1310	}
1311
1312	var undoReplace func()
1313	if replacedDe.IsInitialized() {
1314		undoReplace, err = fbo.removeDirEntryInCacheLocked(
1315			ctx, lState, kmd, newParent, newName, replacedDe)
1316		if err != nil {
1317			return nil, err
1318		}
1319	}
1320	defer func() {
1321		if err != nil && undoReplace != nil {
1322			undoReplace()
1323		}
1324	}()
1325
1326	undoAdd, err := fbo.addDirEntryInCacheLocked(
1327		ctx, lState, kmd, newParent, newName, newDe)
1328	if err != nil {
1329		return nil, err
1330	}
1331	defer func() {
1332		if err != nil && undoAdd != nil {
1333			undoAdd()
1334		}
1335	}()
1336
1337	undoRm, err := fbo.removeDirEntryInCacheLocked(
1338		ctx, lState, kmd, oldParent, oldName, data.DirEntry{})
1339	if err != nil {
1340		return nil, err
1341	}
1342	defer func() {
1343		if err != nil && undoRm != nil {
1344			undoRm()
1345		}
1346	}()
1347
1348	newParentNode := fbo.nodeCache.Get(newParent.TailRef())
1349	undoMove, err := fbo.nodeCache.Move(newDe.Ref(), newParentNode, newName)
1350	if err != nil {
1351		return nil, err
1352	}
1353
1354	return fbo.wrapWithBlockLock(func() {
1355		if undoMove != nil {
1356			undoMove()
1357		}
1358		if undoRm != nil {
1359			undoRm()
1360		}
1361		if undoAdd != nil {
1362			undoAdd()
1363		}
1364		if undoReplace != nil {
1365			undoReplace()
1366		}
1367	}), nil
1368}
1369
1370func (fbo *folderBlockOps) setCachedAttrLocked(
1371	ctx context.Context, lState *kbfssync.LockState,
1372	kmd KeyMetadataWithRootDirEntry, dir data.Path, name data.PathPartString,
1373	attr attrChange, realEntry data.DirEntry) (dirCacheUndoFn, error) {
1374	fbo.blockLock.AssertLocked(lState)
1375
1376	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
1377	if err != nil {
1378		return nil, err
1379	}
1380
1381	if !dir.IsValid() {
1382		// Can't set attrs directly on the root entry, primarily
1383		// because there's no way to indicate it's dirty.  TODO: allow
1384		// mtime-setting on the root dir?
1385		return nil, InvalidParentPathError{dir}
1386	}
1387	var de data.DirEntry
1388	var unlinkedNode Node
1389
1390	dd := fbo.newDirDataLocked(lState, dir, chargedTo, kmd)
1391	de, err = dd.Lookup(ctx, name)
1392	if _, noExist := errors.Cause(err).(idutil.NoSuchNameError); noExist {
1393		// The node may be unlinked.
1394		unlinkedNode = fbo.nodeCache.Get(realEntry.Ref())
1395		if unlinkedNode != nil && !fbo.nodeCache.IsUnlinked(unlinkedNode) {
1396			unlinkedNode = nil
1397		}
1398		if unlinkedNode != nil {
1399			de = fbo.nodeCache.UnlinkedDirEntry(unlinkedNode)
1400		} else {
1401			return nil, err
1402		}
1403	} else if err != nil {
1404		return nil, err
1405	}
1406
1407	oldDe := de
1408	switch attr {
1409	case exAttr:
1410		de.Type = realEntry.Type
1411	case mtimeAttr:
1412		de.Mtime = realEntry.Mtime
1413	}
1414	de.Ctime = realEntry.Ctime
1415
1416	var undoDirtyFn func()
1417	if unlinkedNode != nil {
1418		fbo.nodeCache.UpdateUnlinkedDirEntry(unlinkedNode, de)
1419	} else {
1420		unrefs, err := dd.UpdateEntry(ctx, name, de)
1421		if err != nil {
1422			return nil, err
1423		}
1424		undoDirtyFn = fbo.makeDirDirtyLocked(lState, dir.TailPointer(), unrefs)
1425	}
1426
1427	return fbo.wrapWithBlockLock(func() {
1428		if unlinkedNode != nil {
1429			fbo.nodeCache.UpdateUnlinkedDirEntry(unlinkedNode, oldDe)
1430		} else {
1431			_, _ = dd.UpdateEntry(ctx, name, oldDe)
1432			undoDirtyFn()
1433		}
1434	}), nil
1435}
1436
1437// SetAttrInDirEntryInCache updates an entry from the given directory.
1438func (fbo *folderBlockOps) SetAttrInDirEntryInCache(
1439	ctx context.Context, lState *kbfssync.LockState,
1440	kmd KeyMetadataWithRootDirEntry, p data.Path, newDe data.DirEntry,
1441	attr attrChange) (dirCacheUndoFn, error) {
1442	fbo.blockLock.Lock(lState)
1443	defer fbo.blockLock.Unlock(lState)
1444	return fbo.setCachedAttrLocked(
1445		ctx, lState, kmd, *p.ParentPath(), p.TailName(), attr, newDe)
1446}
1447
1448// getDirtyDirLocked composes getDirLocked and
1449// updateWithDirtyEntriesLocked. Note that a dirty dir means that it
1450// has entries possibly pointing to dirty files, and/or that its
1451// children list is dirty.
1452func (fbo *folderBlockOps) getDirtyDirLocked(ctx context.Context,
1453	lState *kbfssync.LockState, kmd libkey.KeyMetadata, dir data.Path, rtype data.BlockReqType) (
1454	*data.DirBlock, error) {
1455	fbo.blockLock.AssertAnyLocked(lState)
1456
1457	dblock, _, err := fbo.getDirLocked(
1458		ctx, lState, kmd, dir.TailPointer(), dir, rtype)
1459	if err != nil {
1460		return nil, err
1461	}
1462	return dblock, err
1463}
1464
1465// GetDirtyDirCopy returns a deep copy of the directory block for a
1466// dirty directory, while under lock, updated with all cached dirty
1467// entries.
1468func (fbo *folderBlockOps) GetDirtyDirCopy(
1469	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, dir data.Path,
1470	rtype data.BlockReqType) (*data.DirBlock, error) {
1471	fbo.blockLock.RLock(lState)
1472	defer fbo.blockLock.RUnlock(lState)
1473	dblock, err := fbo.getDirtyDirLocked(ctx, lState, kmd, dir, rtype)
1474	if err != nil {
1475		return nil, err
1476	}
1477	// Copy it while under lock.  Otherwise, another operation like
1478	// `Write` can modify it while the caller is trying to copy it,
1479	// leading to a panic like in KBFS-3407.
1480	return dblock.DeepCopy(), nil
1481}
1482
1483// GetChildren returns a map of EntryInfos for the (possibly dirty)
1484// children entries of the given directory.
1485func (fbo *folderBlockOps) GetChildren(
1486	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
1487	dir data.Path) (map[data.PathPartString]data.EntryInfo, error) {
1488	fbo.blockLock.RLock(lState)
1489	defer fbo.blockLock.RUnlock(lState)
1490	dd := fbo.newDirDataLocked(lState, dir, keybase1.UserOrTeamID(""), kmd)
1491	return dd.GetChildren(ctx)
1492}
1493
1494// GetEntries returns a map of DirEntries for the (possibly dirty)
1495// children entries of the given directory.
1496func (fbo *folderBlockOps) GetEntries(
1497	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
1498	dir data.Path) (map[data.PathPartString]data.DirEntry, error) {
1499	fbo.blockLock.RLock(lState)
1500	defer fbo.blockLock.RUnlock(lState)
1501	dd := fbo.newDirDataLocked(lState, dir, keybase1.UserOrTeamID(""), kmd)
1502	return dd.GetEntries(ctx)
1503}
1504
1505func (fbo *folderBlockOps) getEntryLocked(ctx context.Context,
1506	lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry, file data.Path,
1507	includeDeleted bool) (de data.DirEntry, err error) {
1508	fbo.blockLock.AssertAnyLocked(lState)
1509
1510	// See if this is the root.
1511	if !file.HasValidParent() {
1512		if fbo.dirtyRootDirEntry != nil {
1513			return *fbo.dirtyRootDirEntry, nil
1514		}
1515		return kmd.GetRootDirEntry(), nil
1516	}
1517
1518	dd := fbo.newDirDataLocked(
1519		lState, *file.ParentPath(), keybase1.UserOrTeamID(""), kmd)
1520	de, err = dd.Lookup(ctx, file.TailName())
1521	_, noExist := errors.Cause(err).(idutil.NoSuchNameError)
1522	if includeDeleted && (noExist || de.BlockPointer != file.TailPointer()) {
1523		unlinkedNode := fbo.nodeCache.Get(file.TailPointer().Ref())
1524		if unlinkedNode != nil && fbo.nodeCache.IsUnlinked(unlinkedNode) {
1525			return fbo.nodeCache.UnlinkedDirEntry(unlinkedNode), nil
1526		}
1527		return data.DirEntry{}, err
1528	} else if err != nil {
1529		return data.DirEntry{}, err
1530	}
1531	return de, nil
1532}
1533
1534// file must have a valid parent.
1535func (fbo *folderBlockOps) updateEntryLocked(ctx context.Context,
1536	lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry, file data.Path,
1537	de data.DirEntry, includeDeleted bool) error {
1538	fbo.blockLock.AssertAnyLocked(lState)
1539
1540	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
1541	if err != nil {
1542		return err
1543	}
1544	parentPath := *file.ParentPath()
1545	dd := fbo.newDirDataLocked(lState, parentPath, chargedTo, kmd)
1546	unrefs, err := dd.UpdateEntry(ctx, file.TailName(), de)
1547	_, noExist := errors.Cause(err).(idutil.NoSuchNameError)
1548	switch {
1549	case noExist && includeDeleted:
1550		unlinkedNode := fbo.nodeCache.Get(file.TailPointer().Ref())
1551		if unlinkedNode != nil && fbo.nodeCache.IsUnlinked(unlinkedNode) {
1552			fbo.nodeCache.UpdateUnlinkedDirEntry(unlinkedNode, de)
1553			return nil
1554		}
1555		return err
1556	case err != nil:
1557		return err
1558	default:
1559		_ = fbo.makeDirDirtyLocked(lState, parentPath.TailPointer(), unrefs)
1560	}
1561
1562	// If we're in the middle of syncing the directories, but the
1563	// current file is not yet being synced, we need to re-apply this
1564	// update after the sync is done, so it doesn't get lost after the
1565	// syncing directory block is readied.  This only applies to dir
1566	// updates being caused by file changes; other types of dir writes
1567	// are protected by `folderBranchOps.syncLock`, which is held
1568	// during `SyncAll`.
1569	if fbo.dirtyDirsSyncing && !fbo.doDeferWrite {
1570		fbo.log.CDebugf(ctx, "Deferring update entry during sync")
1571		n := fbo.nodeCache.Get(file.TailRef())
1572		fbo.deferredDirUpdates = append(
1573			fbo.deferredDirUpdates, func(lState *kbfssync.LockState) error {
1574				file := fbo.nodeCache.PathFromNode(n)
1575				de.BlockPointer = file.TailPointer()
1576				return fbo.updateEntryLocked(
1577					ctx, lState, kmd, file, de, includeDeleted)
1578			})
1579	}
1580
1581	return nil
1582}
1583
1584// GetEntry returns the possibly-dirty DirEntry of the given file in
1585// its parent DirBlock. file must have a valid parent.
1586func (fbo *folderBlockOps) GetEntry(
1587	ctx context.Context, lState *kbfssync.LockState,
1588	kmd KeyMetadataWithRootDirEntry, file data.Path) (data.DirEntry, error) {
1589	fbo.blockLock.RLock(lState)
1590	defer fbo.blockLock.RUnlock(lState)
1591	return fbo.getEntryLocked(ctx, lState, kmd, file, false)
1592}
1593
1594// GetEntryEvenIfDeleted returns the possibly-dirty DirEntry of the
1595// given file in its parent DirBlock, even if the file has been
1596// deleted. file must have a valid parent.
1597func (fbo *folderBlockOps) GetEntryEvenIfDeleted(
1598	ctx context.Context, lState *kbfssync.LockState,
1599	kmd KeyMetadataWithRootDirEntry, file data.Path) (data.DirEntry, error) {
1600	fbo.blockLock.RLock(lState)
1601	defer fbo.blockLock.RUnlock(lState)
1602	return fbo.getEntryLocked(ctx, lState, kmd, file, true)
1603}
1604
1605func (fbo *folderBlockOps) getChildNodeLocked(
1606	lState *kbfssync.LockState, dir Node, name data.PathPartString,
1607	de data.DirEntry) (Node, error) {
1608	fbo.blockLock.AssertRLocked(lState)
1609
1610	if de.Type == data.Sym {
1611		return nil, nil
1612	}
1613
1614	return fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir, de.Type)
1615}
1616
1617func (fbo *folderBlockOps) GetChildNode(
1618	lState *kbfssync.LockState, dir Node, name data.PathPartString,
1619	de data.DirEntry) (Node, error) {
1620	fbo.blockLock.RLock(lState)
1621	defer fbo.blockLock.RUnlock(lState)
1622	return fbo.getChildNodeLocked(lState, dir, name, de)
1623}
1624
1625// Lookup returns the possibly-dirty DirEntry of the given file in its
1626// parent DirBlock, and a Node for the file if it exists.  It has to
1627// do all of this under the block lock to avoid races with
1628// UpdatePointers.
1629func (fbo *folderBlockOps) Lookup(
1630	ctx context.Context, lState *kbfssync.LockState,
1631	kmd KeyMetadataWithRootDirEntry, dir Node, name data.PathPartString) (
1632	Node, data.DirEntry, error) {
1633	fbo.blockLock.RLock(lState)
1634	defer fbo.blockLock.RUnlock(lState)
1635
1636	// Protect against non-dir nodes being passed in by mistake.
1637	// TODO: we should make this a more specific error probably, but
1638	// then we need to update some places that check for
1639	// `NoSuchNameError` to check for this one as well.
1640	if dir.EntryType() != data.Dir {
1641		fbo.log.CDebugf(
1642			ctx, "Got unexpected node type when looking up %s: %s",
1643			name, dir.EntryType())
1644		return nil, data.DirEntry{}, idutil.NoSuchNameError{Name: name.String()}
1645	}
1646
1647	dirPath := fbo.nodeCache.PathFromNode(dir)
1648	if !dirPath.IsValid() {
1649		return nil, data.DirEntry{}, errors.WithStack(InvalidPathError{dirPath})
1650	}
1651
1652	childPath := dirPath.ChildPathNoPtr(name, fbo.nodeCache.ObfuscatorMaker()())
1653	de, err := fbo.getEntryLocked(ctx, lState, kmd, childPath, false)
1654	if err != nil {
1655		return nil, data.DirEntry{}, err
1656	}
1657
1658	node, err := fbo.getChildNodeLocked(lState, dir, name, de)
1659	if err != nil {
1660		return nil, data.DirEntry{}, err
1661	}
1662	return node, de, nil
1663}
1664
1665func (fbo *folderBlockOps) getOrCreateDirtyFileLocked(
1666	lState *kbfssync.LockState, file data.Path) *data.DirtyFile {
1667	fbo.blockLock.AssertLocked(lState)
1668	ptr := file.TailPointer()
1669	df := fbo.dirtyFiles[ptr]
1670	if df == nil {
1671		df = data.NewDirtyFile(file, fbo.config.DirtyBlockCache())
1672		fbo.dirtyFiles[ptr] = df
1673	}
1674	return df
1675}
1676
1677// cacheBlockIfNotYetDirtyLocked puts a block into the cache, but only
1678// does so if the block isn't already marked as dirty in the cache.
1679// This is useful when operating on a dirty copy of a block that may
1680// already be in the cache.
1681func (fbo *folderBlockOps) cacheBlockIfNotYetDirtyLocked(
1682	ctx context.Context, lState *kbfssync.LockState, ptr data.BlockPointer,
1683	file data.Path, block data.Block) error {
1684	fbo.blockLock.AssertLocked(lState)
1685	df := fbo.getOrCreateDirtyFileLocked(lState, file)
1686	needsCaching, isSyncing := df.SetBlockDirty(ptr)
1687
1688	if needsCaching {
1689		err := fbo.config.DirtyBlockCache().Put(
1690			ctx, fbo.id(), ptr, file.Branch, block)
1691		if err != nil {
1692			return err
1693		}
1694	}
1695
1696	if isSyncing {
1697		fbo.doDeferWrite = true
1698	}
1699	return nil
1700}
1701
1702func (fbo *folderBlockOps) getOrCreateSyncInfoLocked(
1703	lState *kbfssync.LockState, de data.DirEntry) (*syncInfo, error) {
1704	fbo.blockLock.AssertLocked(lState)
1705	ref := de.Ref()
1706	si, ok := fbo.unrefCache[ref]
1707	if !ok {
1708		so, err := newSyncOp(de.BlockPointer)
1709		if err != nil {
1710			return nil, err
1711		}
1712		si = &syncInfo{
1713			oldInfo: de.BlockInfo,
1714			op:      so,
1715		}
1716		fbo.unrefCache[ref] = si
1717	}
1718	return si, nil
1719}
1720
1721// GetDirtyFileBlockRefs returns a list of references of all known dirty
1722// files.
1723func (fbo *folderBlockOps) GetDirtyFileBlockRefs(
1724	lState *kbfssync.LockState) []data.BlockRef {
1725	fbo.blockLock.RLock(lState)
1726	defer fbo.blockLock.RUnlock(lState)
1727	var dirtyRefs []data.BlockRef
1728	for ref := range fbo.unrefCache {
1729		dirtyRefs = append(dirtyRefs, ref)
1730	}
1731	return dirtyRefs
1732}
1733
1734// GetDirtyDirBlockRefs returns a list of references of all known
1735// dirty directories.  Also returns a channel that, while it is open,
1736// all future writes will be blocked until it is closed -- this lets
1737// the caller ensure that the directory entries will remain stable
1738// (not updated with new file sizes by the writes) until all of the
1739// directory blocks have been safely copied.  The caller *must* close
1740// this channel once they are done processing the dirty directory
1741// blocks.
1742func (fbo *folderBlockOps) GetDirtyDirBlockRefs(
1743	lState *kbfssync.LockState) ([]data.BlockRef, chan<- struct{}) {
1744	fbo.blockLock.Lock(lState)
1745	defer fbo.blockLock.Unlock(lState)
1746	var dirtyRefs []data.BlockRef
1747	for ptr := range fbo.dirtyDirs {
1748		dirtyRefs = append(dirtyRefs, ptr.Ref())
1749	}
1750	if fbo.dirtyDirsSyncing {
1751		panic("GetDirtyDirBlockRefs() called twice")
1752	}
1753	fbo.dirtyDirsSyncing = true
1754	ch := make(chan struct{})
1755	fbo.holdNewWritesCh = ch
1756	return dirtyRefs, ch
1757}
1758
1759// GetDirtyDirBlockRefsDone is called to indicate the caller is done
1760// with the data previously returned from `GetDirtyDirBlockRefs()`.
1761func (fbo *folderBlockOps) GetDirtyDirBlockRefsDone(
1762	lState *kbfssync.LockState) {
1763	fbo.blockLock.Lock(lState)
1764	defer fbo.blockLock.Unlock(lState)
1765	fbo.dirtyDirsSyncing = false
1766	fbo.deferredDirUpdates = nil
1767	fbo.holdNewWritesCh = nil
1768}
1769
1770// getDirtyDirUnrefsLocked returns a list of block infos that need to be
1771// unreferenced for the given directory.
1772func (fbo *folderBlockOps) getDirtyDirUnrefsLocked(
1773	lState *kbfssync.LockState, ptr data.BlockPointer) []data.BlockInfo {
1774	fbo.blockLock.AssertRLocked(lState)
1775	return fbo.dirtyDirs[ptr]
1776}
1777
1778// fixChildBlocksAfterRecoverableErrorLocked should be called when a sync
1779// failed with a recoverable block error on a multi-block file.  It
1780// makes sure that any outstanding dirty versions of the file are
1781// fixed up to reflect the fact that some of the indirect pointers now
1782// need to change.
1783func (fbo *folderBlockOps) fixChildBlocksAfterRecoverableErrorLocked(
1784	ctx context.Context, lState *kbfssync.LockState, file data.Path, kmd libkey.KeyMetadata,
1785	redirtyOnRecoverableError map[data.BlockPointer]data.BlockPointer) {
1786	fbo.blockLock.AssertLocked(lState)
1787
1788	defer func() {
1789		// Below, this function can end up writing dirty blocks back
1790		// to the cache, which will set `doDeferWrite` to `true`.
1791		// This leads to future writes being unnecessarily deferred
1792		// when a Sync is not happening, and can lead to dirty data
1793		// being synced twice and sticking around for longer than
1794		// needed.  So just reset `doDeferWrite` once we're
1795		// done. We're under `blockLock`, so this is safe.
1796		fbo.doDeferWrite = false
1797	}()
1798
1799	df := fbo.dirtyFiles[file.TailPointer()]
1800	if df != nil {
1801		// Un-orphan old blocks, since we are reverting back to the
1802		// previous state.
1803		for _, oldPtr := range redirtyOnRecoverableError {
1804			fbo.vlog.CLogf(ctx, libkb.VLog1, "Un-orphaning %v", oldPtr)
1805			df.SetBlockOrphaned(oldPtr, false)
1806		}
1807	}
1808
1809	dirtyBcache := fbo.config.DirtyBlockCache()
1810	topBlock, err := dirtyBcache.Get(
1811		ctx, fbo.id(), file.TailPointer(), fbo.branch())
1812	fblock, ok := topBlock.(*data.FileBlock)
1813	if err != nil || !ok {
1814		fbo.log.CWarningf(ctx, "Couldn't find dirtied "+
1815			"top-block for %v: %v", file.TailPointer(), err)
1816		return
1817	}
1818
1819	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
1820	if err != nil {
1821		fbo.log.CWarningf(ctx, "Couldn't find uid during recovery: %v", err)
1822		return
1823	}
1824	fd := fbo.newFileData(lState, file, chargedTo, kmd)
1825
1826	// If a copy of the top indirect block was made, we need to
1827	// redirty all the sync'd blocks under their new IDs, so that
1828	// future syncs will know they failed.
1829	newPtrs := make(map[data.BlockPointer]bool, len(redirtyOnRecoverableError))
1830	for newPtr := range redirtyOnRecoverableError {
1831		newPtrs[newPtr] = true
1832	}
1833	found, err := fd.FindIPtrsAndClearSize(ctx, fblock, newPtrs)
1834	if err != nil {
1835		fbo.log.CWarningf(
1836			ctx, "Couldn't find and clear iptrs during recovery: %v", err)
1837		return
1838	}
1839	for newPtr, oldPtr := range redirtyOnRecoverableError {
1840		if !found[newPtr] {
1841			continue
1842		}
1843
1844		fbo.vlog.CLogf(
1845			ctx, libkb.VLog1, "Re-dirtying %v (and deleting dirty block %v)",
1846			newPtr, oldPtr)
1847		// These blocks would have been permanent, so they're
1848		// definitely still in the cache.
1849		b, err := fbo.config.BlockCache().Get(newPtr)
1850		if err != nil {
1851			fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
1852			continue
1853		}
1854		if err = fbo.cacheBlockIfNotYetDirtyLocked(
1855			ctx, lState, newPtr, file, b); err != nil {
1856			fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
1857		}
1858		fbo.vlog.CLogf(
1859			ctx, libkb.VLog1, "Deleting dirty ptr %v after recoverable error",
1860			oldPtr)
1861		err = dirtyBcache.Delete(fbo.id(), oldPtr, fbo.branch())
1862		if err != nil {
1863			fbo.vlog.CLogf(
1864				ctx, libkb.VLog1, "Couldn't del-dirty %v: %v", oldPtr, err)
1865		}
1866	}
1867}
1868
1869func (fbo *folderBlockOps) nowUnixNano() int64 {
1870	return fbo.config.Clock().Now().UnixNano()
1871}
1872
1873// PrepRename prepares the given rename operation. It returns the old
1874// and new parent block (which may be the same, and which shouldn't be
1875// modified), and what is to be the new DirEntry.
1876func (fbo *folderBlockOps) PrepRename(
1877	ctx context.Context, lState *kbfssync.LockState,
1878	kmd KeyMetadataWithRootDirEntry, oldParent data.Path,
1879	oldName data.PathPartString, newParent data.Path,
1880	newName data.PathPartString) (
1881	newDe, replacedDe data.DirEntry, ro *renameOp, err error) {
1882	fbo.blockLock.RLock(lState)
1883	defer fbo.blockLock.RUnlock(lState)
1884
1885	// Look up in the old path. Won't be modified, so only fetch for reading.
1886	newDe, err = fbo.getEntryLocked(
1887		ctx, lState, kmd, oldParent.ChildPathNoPtr(oldName, nil), false)
1888	if err != nil {
1889		return data.DirEntry{}, data.DirEntry{}, nil, err
1890	}
1891
1892	oldParentPtr := oldParent.TailPointer()
1893	newParentPtr := newParent.TailPointer()
1894	ro, err = newRenameOp(
1895		oldName.Plaintext(), oldParentPtr, newName.Plaintext(), newParentPtr,
1896		newDe.BlockPointer, newDe.Type)
1897	if err != nil {
1898		return data.DirEntry{}, data.DirEntry{}, nil, err
1899	}
1900	ro.AddUpdate(oldParentPtr, oldParentPtr)
1901	ro.setFinalPath(newParent)
1902	ro.oldFinalPath = oldParent
1903	if oldParentPtr.ID != newParentPtr.ID {
1904		ro.AddUpdate(newParentPtr, newParentPtr)
1905	}
1906
1907	replacedDe, err = fbo.getEntryLocked(
1908		ctx, lState, kmd, newParent.ChildPathNoPtr(newName, nil), false)
1909	if _, notExists := errors.Cause(err).(idutil.NoSuchNameError); notExists {
1910		return newDe, data.DirEntry{}, ro, nil
1911	} else if err != nil {
1912		return data.DirEntry{}, data.DirEntry{}, nil, err
1913	}
1914
1915	return newDe, replacedDe, ro, nil
1916}
1917
1918func (fbo *folderBlockOps) newFileData(lState *kbfssync.LockState,
1919	file data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata) *data.FileData {
1920	fbo.blockLock.AssertAnyLocked(lState)
1921	return data.NewFileData(file, chargedTo, fbo.config.BlockSplitter(), kmd,
1922		func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
1923			file data.Path, rtype data.BlockReqType) (*data.FileBlock, bool, error) {
1924			lState := lState
1925			if rtype == data.BlockReadParallel {
1926				lState = nil
1927			}
1928			return fbo.getFileBlockLocked(
1929				ctx, lState, kmd, ptr, file, rtype)
1930		},
1931		func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
1932			return fbo.cacheBlockIfNotYetDirtyLocked(
1933				ctx, lState, ptr, file, block)
1934		}, fbo.log, fbo.vlog)
1935}
1936
1937func (fbo *folderBlockOps) newFileDataWithCache(lState *kbfssync.LockState,
1938	file data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata,
1939	dirtyBcache data.DirtyBlockCacheSimple) *data.FileData {
1940	fbo.blockLock.AssertAnyLocked(lState)
1941	return data.NewFileData(file, chargedTo, fbo.config.BlockSplitter(), kmd,
1942		func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
1943			file data.Path, rtype data.BlockReqType) (*data.FileBlock, bool, error) {
1944			block, err := dirtyBcache.Get(ctx, file.Tlf, ptr, file.Branch)
1945			if fblock, ok := block.(*data.FileBlock); ok && err == nil {
1946				return fblock, true, nil
1947			}
1948			lState := lState
1949			if rtype == data.BlockReadParallel {
1950				lState = nil
1951			}
1952			return fbo.getFileBlockLocked(
1953				ctx, lState, kmd, ptr, file, rtype)
1954		},
1955		func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
1956			return dirtyBcache.Put(ctx, file.Tlf, ptr, file.Branch, block)
1957		}, fbo.log, fbo.vlog)
1958}
1959
1960// Read reads from the given file into the given buffer at the given
1961// offset. It returns the number of bytes read and nil, or 0 and the
1962// error if there was one.
1963func (fbo *folderBlockOps) Read(
1964	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file Node,
1965	dest []byte, off int64) (int64, error) {
1966	fbo.blockLock.RLock(lState)
1967	defer fbo.blockLock.RUnlock(lState)
1968
1969	filePath := fbo.nodeCache.PathFromNode(file)
1970
1971	fbo.vlog.CLogf(ctx, libkb.VLog1, "Reading from %v", filePath.TailPointer())
1972
1973	var id keybase1.UserOrTeamID // Data reads don't depend on the id.
1974	fd := fbo.newFileData(lState, filePath, id, kmd)
1975	return fd.Read(ctx, dest, data.Int64Offset(off))
1976}
1977
1978func (fbo *folderBlockOps) maybeWaitOnDeferredWrites(
1979	ctx context.Context, lState *kbfssync.LockState, file Node,
1980	c data.DirtyPermChan) error {
1981	var errListener chan error
1982	registerErr := func() error {
1983		fbo.blockLock.Lock(lState)
1984		defer fbo.blockLock.Unlock(lState)
1985		filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
1986		if err != nil {
1987			return err
1988		}
1989		df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
1990		errListener = make(chan error, 1)
1991		df.AddErrListener(errListener)
1992		return nil
1993	}
1994	err := registerErr()
1995	if err != nil {
1996		return err
1997	}
1998
1999	logTimer := time.After(100 * time.Millisecond)
2000	doLogUnblocked := false
2001	for {
2002		var err error
2003	outerSelect:
2004		select {
2005		case <-c:
2006			if doLogUnblocked {
2007				fbo.vlog.CLogf(ctx, libkb.VLog1, "Write unblocked")
2008			}
2009			// Make sure there aren't any queued errors.
2010			select {
2011			case err = <-errListener:
2012				// Break the select to check the cause of the error below.
2013				break outerSelect
2014			default:
2015			}
2016			return nil
2017		case <-logTimer:
2018			// Print a log message once if it's taking too long.
2019			fbo.log.CDebugf(ctx,
2020				"Blocking a write because of a full dirty buffer")
2021			doLogUnblocked = true
2022		case <-ctx.Done():
2023			return ctx.Err()
2024		case err = <-errListener:
2025			// Fall through to check the cause of the error below.
2026		}
2027		// Context errors are safe to ignore, since they are likely to
2028		// be specific to a previous sync (e.g., a user hit ctrl-c
2029		// during an fsync, or a sync timed out, or a test was
2030		// provoking an error specifically [KBFS-2164]).
2031		cause := errors.Cause(err)
2032		if cause == context.Canceled || cause == context.DeadlineExceeded {
2033			fbo.vlog.CLogf(ctx, libkb.VLog1, "Ignoring sync err: %+v", err)
2034			err := registerErr()
2035			if err != nil {
2036				return err
2037			}
2038			continue
2039		} else if err != nil {
2040			// Treat other errors as fatal to this write -- e.g., the
2041			// user's quota is full, the local journal is broken,
2042			// etc. XXX: should we ignore errors that are specific
2043			// only to some other file being sync'd (e.g.,
2044			// "recoverable" block errors from which we couldn't
2045			// recover)?
2046			return err
2047		}
2048	}
2049}
2050
2051func (fbo *folderBlockOps) pathFromNodeForBlockWriteLocked(
2052	lState *kbfssync.LockState, n Node) (data.Path, error) {
2053	fbo.blockLock.AssertLocked(lState)
2054	p := fbo.nodeCache.PathFromNode(n)
2055	if !p.IsValid() {
2056		return data.Path{}, errors.WithStack(InvalidPathError{p})
2057	}
2058	return p, nil
2059}
2060
2061// writeGetFileLocked checks write permissions explicitly for
2062// writeDataLocked, truncateLocked etc and returns
2063func (fbo *folderBlockOps) writeGetFileLocked(
2064	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
2065	file data.Path) (*data.FileBlock, error) {
2066	fbo.blockLock.AssertLocked(lState)
2067
2068	session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
2069	if err != nil {
2070		return nil, err
2071	}
2072	isWriter, err := kmd.IsWriter(
2073		ctx, fbo.config.KBPKI(), fbo.config, session.UID, session.VerifyingKey)
2074	if err != nil {
2075		return nil, err
2076	}
2077	if !isWriter {
2078		return nil, tlfhandle.NewWriteAccessError(kmd.GetTlfHandle(),
2079			session.Name, file.String())
2080	}
2081	fblock, err := fbo.getFileLocked(ctx, lState, kmd, file, data.BlockWrite)
2082	if err != nil {
2083		return nil, err
2084	}
2085	return fblock, nil
2086}
2087
2088// Returns the set of blocks dirtied during this write that might need
2089// to be cleaned up if the write is deferred.
2090func (fbo *folderBlockOps) writeDataLocked(
2091	ctx context.Context, lState *kbfssync.LockState,
2092	kmd KeyMetadataWithRootDirEntry, file data.Path, buf []byte, off int64) (
2093	latestWrite WriteRange, dirtyPtrs []data.BlockPointer,
2094	newlyDirtiedChildBytes int64, err error) {
2095	_, wasAlreadyUnref := fbo.unrefCache[file.TailPointer().Ref()]
2096	defer func() {
2097		// if the write didn't succeed, and the file wasn't already
2098		// being cached, clear out any cached state.
2099		if err != nil && !wasAlreadyUnref {
2100			_ = fbo.clearCacheInfoLocked(lState, file)
2101		}
2102	}()
2103
2104	if jManager, err := GetJournalManager(fbo.config); err == nil {
2105		jManager.dirtyOpStart(fbo.id())
2106		defer jManager.dirtyOpEnd(fbo.id())
2107	}
2108
2109	fbo.blockLock.AssertLocked(lState)
2110	fbo.vlog.CLogf(ctx, libkb.VLog1, "writeDataLocked on file pointer %v",
2111		file.TailPointer())
2112	defer func() {
2113		fbo.vlog.CLogf(ctx, libkb.VLog1, "writeDataLocked done: %v", err)
2114	}()
2115
2116	fblock, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
2117	if err != nil {
2118		return WriteRange{}, nil, 0, err
2119	}
2120
2121	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
2122	if err != nil {
2123		return WriteRange{}, nil, 0, err
2124	}
2125
2126	fd := fbo.newFileData(lState, file, chargedTo, kmd)
2127
2128	dirtyBcache := fbo.config.DirtyBlockCache()
2129	df := fbo.getOrCreateDirtyFileLocked(lState, file)
2130	defer func() {
2131		// Always update unsynced bytes and potentially force a sync,
2132		// even on an error, since the previously-dirty bytes stay in
2133		// the cache.
2134		df.UpdateNotYetSyncingBytes(newlyDirtiedChildBytes)
2135		if dirtyBcache.ShouldForceSync(fbo.id()) {
2136			select {
2137			// If we can't send on the channel, that means a sync is
2138			// already in progress.
2139			case fbo.forceSyncChan <- struct{}{}:
2140				fbo.vlog.CLogf(
2141					ctx, libkb.VLog1, "Forcing a sync due to full buffer")
2142			default:
2143			}
2144		}
2145	}()
2146
2147	de, err := fbo.getEntryLocked(ctx, lState, kmd, file, true)
2148	if err != nil {
2149		return WriteRange{}, nil, 0, err
2150	}
2151	if de.BlockPointer != file.TailPointer() {
2152		fbo.log.CDebugf(ctx, "DirEntry and file tail pointer don't match: "+
2153			"%v vs %v, parent=%s", de.BlockPointer, file.TailPointer(),
2154			file.ParentPath().TailPointer())
2155	}
2156
2157	si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
2158	if err != nil {
2159		return WriteRange{}, nil, 0, err
2160	}
2161
2162	newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, bytesExtended, err :=
2163		fd.Write(ctx, buf, data.Int64Offset(off), fblock, de, df)
2164	// Record the unrefs before checking the error so we remember the
2165	// state of newly dirtied blocks.
2166	si.unrefs = append(si.unrefs, unrefs...)
2167	if err != nil {
2168		return WriteRange{}, nil, newlyDirtiedChildBytes, err
2169	}
2170
2171	// Update the file's directory entry.
2172	now := fbo.nowUnixNano()
2173	newDe.Mtime = now
2174	newDe.Ctime = now
2175	err = fbo.updateEntryLocked(ctx, lState, kmd, file, newDe, true)
2176	if err != nil {
2177		return WriteRange{}, nil, newlyDirtiedChildBytes, err
2178	}
2179
2180	if fbo.doDeferWrite {
2181		df.AddDeferredNewBytes(bytesExtended)
2182	}
2183
2184	latestWrite = si.op.addWrite(uint64(off), uint64(len(buf)))
2185
2186	return latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
2187}
2188
2189func (fbo *folderBlockOps) holdWritesLocked(
2190	ctx context.Context, lState *kbfssync.LockState) error {
2191	fbo.blockLock.AssertLocked(lState)
2192
2193	// Loop until either the hold channel is nil, or it has been
2194	// closed.  However, we can't hold the lock while we're waiting
2195	// for it to close, as that will cause deadlocks.  So we need to
2196	// verify that it's the _same_ channel that was closed after we
2197	// re-take the lock; otherwise, we need to wait again on the new
2198	// channel.
2199	for fbo.holdNewWritesCh != nil {
2200		ch := fbo.holdNewWritesCh
2201		fbo.blockLock.Unlock(lState)
2202		fbo.vlog.CLogf(ctx, libkb.VLog1, "Blocking write on hold channel")
2203		select {
2204		case <-ch:
2205			fbo.blockLock.Lock(lState)
2206			// If the channel hasn't changed since we checked it
2207			// outside of the lock, we are good to proceed.
2208			if ch == fbo.holdNewWritesCh {
2209				fbo.vlog.CLogf(
2210					ctx, libkb.VLog1, "Unblocking write on hold channel")
2211				return nil
2212			}
2213		case <-ctx.Done():
2214			fbo.blockLock.Lock(lState)
2215			return ctx.Err()
2216		}
2217	}
2218	return nil
2219}
2220
2221// Write writes the given data to the given file. May block if there
2222// is too much unflushed data; in that case, it will be unblocked by a
2223// future sync.
2224func (fbo *folderBlockOps) Write(
2225	ctx context.Context, lState *kbfssync.LockState,
2226	kmd KeyMetadataWithRootDirEntry, file Node, buf []byte, off int64) error {
2227	// If there is too much unflushed data, we should wait until some
2228	// of it gets flush so our memory usage doesn't grow without
2229	// bound.
2230	c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
2231		fbo.id(), int64(len(buf)))
2232	if err != nil {
2233		return err
2234	}
2235	defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
2236		-int64(len(buf)), false)
2237	err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
2238	if err != nil {
2239		return err
2240	}
2241
2242	fbo.blockLock.Lock(lState)
2243	defer fbo.blockLock.Unlock(lState)
2244
2245	err = fbo.holdWritesLocked(ctx, lState)
2246	if err != nil {
2247		return err
2248	}
2249
2250	filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
2251	if err != nil {
2252		return err
2253	}
2254
2255	defer func() {
2256		fbo.doDeferWrite = false
2257	}()
2258
2259	latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.writeDataLocked(
2260		ctx, lState, kmd, filePath, buf, off)
2261	if err != nil {
2262		return err
2263	}
2264
2265	fbo.observers.localChange(ctx, file, latestWrite)
2266
2267	if fbo.doDeferWrite {
2268		// There's an ongoing sync, and this write altered dirty
2269		// blocks that are in the process of syncing.  So, we have to
2270		// redo this write once the sync is complete, using the new
2271		// file path.
2272		//
2273		// There is probably a less terrible of doing this that
2274		// doesn't involve so much copying and rewriting, but this is
2275		// the most obviously correct way.
2276		bufCopy := make([]byte, len(buf))
2277		copy(bufCopy, buf)
2278		fbo.vlog.CLogf(
2279			ctx, libkb.VLog1, "Deferring a write to file %v off=%d len=%d",
2280			filePath.TailPointer(), off, len(buf))
2281		ds := fbo.deferred[filePath.TailRef()]
2282		ds.dirtyDeletes = append(ds.dirtyDeletes, dirtyPtrs...)
2283		ds.writes = append(ds.writes,
2284			func(ctx context.Context, lState *kbfssync.LockState,
2285				kmd KeyMetadataWithRootDirEntry, f data.Path) error {
2286				// We are about to re-dirty these bytes, so mark that
2287				// they will no longer be synced via the old file.
2288				df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
2289				df.UpdateNotYetSyncingBytes(-newlyDirtiedChildBytes)
2290
2291				// Write the data again.  We know this won't be
2292				// deferred, so no need to check the new ptrs.
2293				_, _, _, err = fbo.writeDataLocked(
2294					ctx, lState, kmd, f, bufCopy, off)
2295				return err
2296			})
2297		ds.waitBytes += newlyDirtiedChildBytes
2298		fbo.deferred[filePath.TailRef()] = ds
2299	}
2300
2301	return nil
2302}
2303
2304// truncateExtendLocked is called by truncateLocked to extend a file and
2305// creates a hole.
2306func (fbo *folderBlockOps) truncateExtendLocked(
2307	ctx context.Context, lState *kbfssync.LockState,
2308	kmd KeyMetadataWithRootDirEntry, file data.Path, size uint64,
2309	parentBlocks []data.ParentBlockAndChildIndex) (
2310	WriteRange, []data.BlockPointer, error) {
2311	fblock, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
2312	if err != nil {
2313		return WriteRange{}, nil, err
2314	}
2315
2316	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
2317	if err != nil {
2318		return WriteRange{}, nil, err
2319	}
2320
2321	fd := fbo.newFileData(lState, file, chargedTo, kmd)
2322
2323	de, err := fbo.getEntryLocked(ctx, lState, kmd, file, true)
2324	if err != nil {
2325		return WriteRange{}, nil, err
2326	}
2327	df := fbo.getOrCreateDirtyFileLocked(lState, file)
2328	newDe, dirtyPtrs, err := fd.TruncateExtend(
2329		ctx, size, fblock, parentBlocks, de, df)
2330	if err != nil {
2331		return WriteRange{}, nil, err
2332	}
2333
2334	now := fbo.nowUnixNano()
2335	newDe.Mtime = now
2336	newDe.Ctime = now
2337	err = fbo.updateEntryLocked(ctx, lState, kmd, file, newDe, true)
2338	if err != nil {
2339		return WriteRange{}, nil, err
2340	}
2341
2342	si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
2343	if err != nil {
2344		return WriteRange{}, nil, err
2345	}
2346	latestWrite := si.op.addTruncate(size)
2347
2348	if fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
2349		select {
2350		// If we can't send on the channel, that means a sync is
2351		// already in progress
2352		case fbo.forceSyncChan <- struct{}{}:
2353			fbo.vlog.CLogf(
2354				ctx, libkb.VLog1, "Forcing a sync due to full buffer")
2355		default:
2356		}
2357	}
2358
2359	fbo.vlog.CLogf(ctx, libkb.VLog1, "truncateExtendLocked: done")
2360	return latestWrite, dirtyPtrs, nil
2361}
2362
2363// Returns the set of newly-ID'd blocks created during this truncate
2364// that might need to be cleaned up if the truncate is deferred.
2365func (fbo *folderBlockOps) truncateLocked(
2366	ctx context.Context, lState *kbfssync.LockState,
2367	kmd KeyMetadataWithRootDirEntry, file data.Path, size uint64) (
2368	wr *WriteRange, ptrs []data.BlockPointer, dirtyBytes int64, err error) {
2369	_, wasAlreadyUnref := fbo.unrefCache[file.TailPointer().Ref()]
2370	defer func() {
2371		// if the truncate didn't succeed, and the file wasn't already
2372		// being cached, clear out any cached state.
2373		if err != nil && !wasAlreadyUnref {
2374			_ = fbo.clearCacheInfoLocked(lState, file)
2375		}
2376	}()
2377
2378	if jManager, err := GetJournalManager(fbo.config); err == nil {
2379		jManager.dirtyOpStart(fbo.id())
2380		defer jManager.dirtyOpEnd(fbo.id())
2381	}
2382
2383	fblock, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
2384	if err != nil {
2385		return &WriteRange{}, nil, 0, err
2386	}
2387
2388	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
2389	if err != nil {
2390		return &WriteRange{}, nil, 0, err
2391	}
2392
2393	fd := fbo.newFileData(lState, file, chargedTo, kmd)
2394
2395	// find the block where the file should now end
2396	iSize := int64(size) // TODO: deal with overflow
2397	_, parentBlocks, block, nextBlockOff, startOff, _, err :=
2398		fd.GetFileBlockAtOffset(
2399			ctx, fblock, data.Int64Offset(iSize), data.BlockWrite)
2400	if err != nil {
2401		return &WriteRange{}, nil, 0, err
2402	}
2403
2404	currLen := int64(startOff) + int64(len(block.Contents))
2405	switch {
2406	case currLen+truncateExtendCutoffPoint < iSize:
2407		latestWrite, dirtyPtrs, err := fbo.truncateExtendLocked(
2408			ctx, lState, kmd, file, uint64(iSize), parentBlocks)
2409		if err != nil {
2410			return &latestWrite, dirtyPtrs, 0, err
2411		}
2412		return &latestWrite, dirtyPtrs, 0, err
2413	case currLen < iSize:
2414		moreNeeded := iSize - currLen
2415		latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err :=
2416			fbo.writeDataLocked(
2417				ctx, lState, kmd, file, make([]byte, moreNeeded), currLen)
2418		if err != nil {
2419			return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
2420		}
2421		return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
2422	case currLen == iSize && nextBlockOff < 0:
2423		// same size!
2424		if !wasAlreadyUnref {
2425			_ = fbo.clearCacheInfoLocked(lState, file)
2426		}
2427		return nil, nil, 0, nil
2428	}
2429
2430	// update the local entry size
2431	de, err := fbo.getEntryLocked(ctx, lState, kmd, file, true)
2432	if err != nil {
2433		return nil, nil, 0, err
2434	}
2435
2436	si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
2437	if err != nil {
2438		return nil, nil, 0, err
2439	}
2440
2441	newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, err := fd.TruncateShrink(
2442		ctx, size, fblock, de)
2443	// Record the unrefs before checking the error so we remember the
2444	// state of newly dirtied blocks.
2445	si.unrefs = append(si.unrefs, unrefs...)
2446	if err != nil {
2447		return nil, nil, newlyDirtiedChildBytes, err
2448	}
2449
2450	// Update dirtied bytes and unrefs regardless of error.
2451	df := fbo.getOrCreateDirtyFileLocked(lState, file)
2452	df.UpdateNotYetSyncingBytes(newlyDirtiedChildBytes)
2453
2454	latestWrite := si.op.addTruncate(size)
2455	now := fbo.nowUnixNano()
2456	newDe.Mtime = now
2457	newDe.Ctime = now
2458	err = fbo.updateEntryLocked(ctx, lState, kmd, file, newDe, true)
2459	if err != nil {
2460		return nil, nil, newlyDirtiedChildBytes, err
2461	}
2462
2463	return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
2464}
2465
2466// Truncate truncates or extends the given file to the given size.
2467// May block if there is too much unflushed data; in that case, it
2468// will be unblocked by a future sync.
2469func (fbo *folderBlockOps) Truncate(
2470	ctx context.Context, lState *kbfssync.LockState,
2471	kmd KeyMetadataWithRootDirEntry, file Node, size uint64) error {
2472	// If there is too much unflushed data, we should wait until some
2473	// of it gets flush so our memory usage doesn't grow without
2474	// bound.
2475	//
2476	// Assume the whole remaining file will be dirty after this
2477	// truncate.  TODO: try to figure out how many bytes actually will
2478	// be dirtied ahead of time?
2479	c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
2480		fbo.id(), int64(size))
2481	if err != nil {
2482		return err
2483	}
2484	defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
2485		-int64(size), false)
2486	err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
2487	if err != nil {
2488		return err
2489	}
2490
2491	fbo.blockLock.Lock(lState)
2492	defer fbo.blockLock.Unlock(lState)
2493
2494	err = fbo.holdWritesLocked(ctx, lState)
2495	if err != nil {
2496		return err
2497	}
2498
2499	filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
2500	if err != nil {
2501		return err
2502	}
2503
2504	defer func() {
2505		fbo.doDeferWrite = false
2506	}()
2507
2508	latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.truncateLocked(
2509		ctx, lState, kmd, filePath, size)
2510	if err != nil {
2511		return err
2512	}
2513
2514	if latestWrite != nil {
2515		fbo.observers.localChange(ctx, file, *latestWrite)
2516	}
2517
2518	if fbo.doDeferWrite {
2519		// There's an ongoing sync, and this truncate altered
2520		// dirty blocks that are in the process of syncing.  So,
2521		// we have to redo this truncate once the sync is complete,
2522		// using the new file path.
2523		fbo.vlog.CLogf(
2524			ctx, libkb.VLog1, "Deferring a truncate to file %v",
2525			filePath.TailPointer())
2526		ds := fbo.deferred[filePath.TailRef()]
2527		ds.dirtyDeletes = append(ds.dirtyDeletes, dirtyPtrs...)
2528		ds.writes = append(ds.writes,
2529			func(ctx context.Context, lState *kbfssync.LockState,
2530				kmd KeyMetadataWithRootDirEntry, f data.Path) error {
2531				// We are about to re-dirty these bytes, so mark that
2532				// they will no longer be synced via the old file.
2533				df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
2534				df.UpdateNotYetSyncingBytes(-newlyDirtiedChildBytes)
2535
2536				// Truncate the file again.  We know this won't be
2537				// deferred, so no need to check the new ptrs.
2538				_, _, _, err := fbo.truncateLocked(
2539					ctx, lState, kmd, f, size)
2540				return err
2541			})
2542		ds.waitBytes += newlyDirtiedChildBytes
2543		fbo.deferred[filePath.TailRef()] = ds
2544	}
2545
2546	return nil
2547}
2548
2549// IsDirty returns whether the given file is dirty; if false is
2550// returned, then the file doesn't need to be synced.
2551func (fbo *folderBlockOps) IsDirty(lState *kbfssync.LockState, file data.Path) bool {
2552	fbo.blockLock.RLock(lState)
2553	defer fbo.blockLock.RUnlock(lState)
2554	// A dirty file should probably match all three of these, but
2555	// check them individually just in case.
2556	if fbo.config.DirtyBlockCache().IsDirty(
2557		fbo.id(), file.TailPointer(), file.Branch) {
2558		return true
2559	}
2560
2561	if _, ok := fbo.dirtyFiles[file.TailPointer()]; ok {
2562		return ok
2563	}
2564
2565	_, ok := fbo.unrefCache[file.TailRef()]
2566	return ok
2567}
2568
2569func (fbo *folderBlockOps) clearCacheInfoLocked(lState *kbfssync.LockState,
2570	file data.Path) error {
2571	fbo.blockLock.AssertLocked(lState)
2572	ref := file.TailRef()
2573	delete(fbo.unrefCache, ref)
2574	df := fbo.dirtyFiles[file.TailPointer()]
2575	if df != nil {
2576		err := df.FinishSync()
2577		if err != nil {
2578			return err
2579		}
2580		delete(fbo.dirtyFiles, file.TailPointer())
2581	}
2582	return nil
2583}
2584
2585func (fbo *folderBlockOps) clearAllDirtyDirsLocked(
2586	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata) {
2587	fbo.blockLock.AssertLocked(lState)
2588	dirtyBCache := fbo.config.DirtyBlockCache()
2589	for ptr := range fbo.dirtyDirs {
2590		dir := data.Path{
2591			FolderBranch: fbo.folderBranch,
2592			Path: []data.PathNode{
2593				{BlockPointer: ptr,
2594					Name: data.NewPathPartString(ptr.String(), nil),
2595				},
2596			},
2597		}
2598		dd := fbo.newDirDataLocked(lState, dir, keybase1.UserOrTeamID(""), kmd)
2599		childPtrs, err := dd.GetDirtyChildPtrs(ctx, dirtyBCache)
2600		if err != nil {
2601			fbo.log.CDebugf(ctx, "Failed to get child ptrs for %v: %+v",
2602				ptr, err)
2603		}
2604		for childPtr := range childPtrs {
2605			err := dirtyBCache.Delete(fbo.id(), childPtr, fbo.branch())
2606			if err != nil {
2607				fbo.log.CDebugf(
2608					ctx, "Failed to delete %v from dirty "+"cache: %+v",
2609					childPtr, err)
2610			}
2611		}
2612
2613		err = dirtyBCache.Delete(fbo.id(), ptr, fbo.branch())
2614		if err != nil {
2615			fbo.log.CDebugf(ctx, "Failed to delete %v from dirty cache: %+v",
2616				ptr, err)
2617		}
2618	}
2619	fbo.dirtyDirs = make(map[data.BlockPointer][]data.BlockInfo)
2620	fbo.dirtyRootDirEntry = nil
2621	fbo.dirtyDirsSyncing = false
2622	deferredDirUpdates := fbo.deferredDirUpdates
2623	fbo.deferredDirUpdates = nil
2624	// Re-apply any deferred directory updates related to files that
2625	// weren't synced as part of this batch.
2626	for _, f := range deferredDirUpdates {
2627		err := f(lState)
2628		if err != nil {
2629			fbo.log.CWarningf(ctx, "Deferred entry update failed: %+v", err)
2630		}
2631	}
2632}
2633
2634// ClearCacheInfo removes any cached info for the the given file.
2635func (fbo *folderBlockOps) ClearCacheInfo(
2636	lState *kbfssync.LockState, file data.Path) error {
2637	fbo.blockLock.Lock(lState)
2638	defer fbo.blockLock.Unlock(lState)
2639	return fbo.clearCacheInfoLocked(lState, file)
2640}
2641
2642// revertSyncInfoAfterRecoverableError updates the saved sync info to
2643// include all the blocks from before the error, except for those that
2644// have encountered recoverable block errors themselves.
2645func (fbo *folderBlockOps) revertSyncInfoAfterRecoverableError(
2646	ctx context.Context, blocksToRemove []data.BlockPointer, result fileSyncState) {
2647	si := result.si
2648	savedSi := result.savedSi
2649
2650	// Save the blocks we need to clean up on the next attempt.
2651	toClean := si.toCleanIfUnused
2652
2653	newIndirect := make(map[data.BlockPointer]bool)
2654	for _, ptr := range result.newIndirectFileBlockPtrs {
2655		newIndirect[ptr] = true
2656	}
2657
2658	// Propagate all unrefs forward, except those that belong to new
2659	// blocks that were created during the sync.
2660	unrefs := make([]data.BlockInfo, 0, len(si.unrefs))
2661	for _, unref := range si.unrefs {
2662		if newIndirect[unref.BlockPointer] {
2663			fbo.vlog.CLogf(ctx, libkb.VLog1, "Dropping unref %v", unref)
2664			continue
2665		}
2666		unrefs = append(unrefs, unref)
2667	}
2668
2669	// This sync will be retried and needs new blocks, so
2670	// reset everything in the sync info.
2671	*si = *savedSi
2672	si.toCleanIfUnused = toClean
2673	si.unrefs = unrefs
2674	if si.bps == nil {
2675		return
2676	}
2677
2678	// Mark any bad pointers so they get skipped next time.
2679	blocksToRemoveSet := make(map[data.BlockPointer]bool)
2680	for _, ptr := range blocksToRemove {
2681		blocksToRemoveSet[ptr] = true
2682	}
2683
2684	newBps, err := savedSi.bps.deepCopyWithBlacklist(ctx, blocksToRemoveSet)
2685	if err != nil {
2686		return
2687	}
2688	si.bps = newBps
2689}
2690
2691// fileSyncState holds state for a sync operation for a single
2692// file.
2693type fileSyncState struct {
2694	// If fblock is non-nil, the (dirty, indirect, cached) block
2695	// it points to will be set to savedFblock on a recoverable
2696	// error.
2697	fblock, savedFblock *data.FileBlock
2698
2699	// redirtyOnRecoverableError, which is non-nil only when fblock is
2700	// non-nil, contains pointers that need to be re-dirtied if the
2701	// top block gets copied during the sync, and a recoverable error
2702	// happens.  Maps to the old block pointer for the block, which
2703	// would need a DirtyBlockCache.Delete.
2704	redirtyOnRecoverableError map[data.BlockPointer]data.BlockPointer
2705
2706	// If si is non-nil, its updated state will be reset on
2707	// error. Also, if the error is recoverable, it will be
2708	// reverted to savedSi.
2709	//
2710	// TODO: Working with si in this way is racy, since si is a
2711	// member of unrefCache.
2712	si, savedSi *syncInfo
2713
2714	// oldFileBlockPtrs is a list of transient entries in the
2715	// block cache for the file, which should be removed when the
2716	// sync finishes.
2717	oldFileBlockPtrs []data.BlockPointer
2718
2719	// newIndirectFileBlockPtrs is a list of permanent entries
2720	// added to the block cache for the file, which should be
2721	// removed after the blocks have been sent to the server.
2722	// They are not removed on an error, because in that case the
2723	// file is still dirty locally and may get another chance to
2724	// be sync'd.
2725	//
2726	// TODO: This can be a list of IDs instead.
2727	newIndirectFileBlockPtrs []data.BlockPointer
2728}
2729
2730// startSyncWrite contains the portion of StartSync() that's done
2731// while write-locking blockLock.  If there is no dirty de cache
2732// entry, dirtyDe will be nil.
2733func (fbo *folderBlockOps) startSyncWrite(ctx context.Context,
2734	lState *kbfssync.LockState, md *RootMetadata, file data.Path) (
2735	fblock *data.FileBlock, bps blockPutStateCopiable, syncState fileSyncState,
2736	dirtyDe *data.DirEntry, err error) {
2737	fbo.blockLock.Lock(lState)
2738	defer fbo.blockLock.Unlock(lState)
2739
2740	// update the parent directories, and write all the new blocks out
2741	// to disk
2742	fblock, err = fbo.getFileLocked(ctx, lState, md.ReadOnly(), file, data.BlockWrite)
2743	if err != nil {
2744		return nil, nil, syncState, nil, err
2745	}
2746
2747	fileRef := file.TailRef()
2748	si, ok := fbo.unrefCache[fileRef]
2749	if !ok {
2750		return nil, nil, syncState, nil,
2751			fmt.Errorf("No syncOp found for file ref %v", fileRef)
2752	}
2753
2754	// Collapse the write range to reduce the size of the sync op.
2755	si.op.Writes = si.op.collapseWriteRange(nil)
2756	// If this function returns a success, we need to make sure the op
2757	// in `md` is not the same variable as the op in `unrefCache`,
2758	// because the latter could get updated still by local writes
2759	// before `md` is flushed to the server.  We don't copy it here
2760	// because code below still needs to modify it (and by extension,
2761	// the one stored in `syncState.si`).
2762	si.op.setFinalPath(file)
2763	md.AddOp(si.op)
2764
2765	// Fill in syncState.
2766	if fblock.IsInd {
2767		fblockCopy := fblock.DeepCopy()
2768		syncState.fblock = fblock
2769		syncState.savedFblock = fblockCopy
2770		syncState.redirtyOnRecoverableError = make(map[data.BlockPointer]data.BlockPointer)
2771	}
2772	syncState.si = si
2773	syncState.savedSi, err = si.DeepCopy(ctx, fbo.config.Codec())
2774	if err != nil {
2775		return nil, nil, syncState, nil, err
2776	}
2777
2778	if si.bps == nil {
2779		si.bps = newBlockPutStateMemory(1)
2780	} else {
2781		// reinstate byte accounting from the previous Sync
2782		md.SetRefBytes(si.refBytes)
2783		md.AddDiskUsage(si.refBytes)
2784		md.SetUnrefBytes(si.unrefBytes)
2785		md.SetMDRefBytes(0) // this will be calculated anew
2786		md.SetDiskUsage(md.DiskUsage() - si.unrefBytes)
2787		syncState.newIndirectFileBlockPtrs = append(
2788			syncState.newIndirectFileBlockPtrs, si.op.Refs()...)
2789	}
2790	defer func() {
2791		si.refBytes = md.RefBytes()
2792		si.unrefBytes = md.UnrefBytes()
2793	}()
2794
2795	chargedTo, err := fbo.getChargedToLocked(ctx, lState, md)
2796	if err != nil {
2797		return nil, nil, syncState, nil, err
2798	}
2799
2800	dirtyBcache := fbo.config.DirtyBlockCache()
2801	df := fbo.getOrCreateDirtyFileLocked(lState, file)
2802	fd := fbo.newFileData(lState, file, chargedTo, md.ReadOnly())
2803
2804	// Note: below we add possibly updated file blocks as "unref" and
2805	// "ref" blocks.  This is fine, since conflict resolution or
2806	// notifications will never happen within a file.
2807
2808	// If needed, split the children blocks up along new boundaries
2809	// (e.g., if using a fingerprint-based block splitter).
2810	unrefs, err := fd.Split(ctx, fbo.id(), dirtyBcache, fblock, df)
2811	// Preserve any unrefs before checking the error.
2812	for _, unref := range unrefs {
2813		md.AddUnrefBlock(unref)
2814	}
2815	if err != nil {
2816		return nil, nil, syncState, nil, err
2817	}
2818
2819	// Ready all children blocks, if any.
2820	oldPtrs, err := fd.Ready(ctx, fbo.id(), fbo.config.BlockCache(),
2821		fbo.config.DirtyBlockCache(), fbo.config.BlockOps(), si.bps, fblock, df,
2822		fbo.cacheHashBehavior())
2823	if err != nil {
2824		return nil, nil, syncState, nil, err
2825	}
2826
2827	for newInfo, oldPtr := range oldPtrs {
2828		syncState.newIndirectFileBlockPtrs = append(
2829			syncState.newIndirectFileBlockPtrs, newInfo.BlockPointer)
2830		df.SetBlockOrphaned(oldPtr, true)
2831
2832		// Defer the DirtyBlockCache.Delete until after the new path
2833		// is ready, in case anyone tries to read the dirty file in
2834		// the meantime.
2835		syncState.oldFileBlockPtrs = append(syncState.oldFileBlockPtrs, oldPtr)
2836
2837		md.AddRefBlock(newInfo)
2838
2839		// If this block is replacing a block from a previous, failed
2840		// Sync, we need to take that block out of the refs list, and
2841		// avoid unrefing it as well.
2842		si.removeReplacedBlock(ctx, fbo.log, oldPtr)
2843
2844		err = df.SetBlockSyncing(ctx, oldPtr)
2845		if err != nil {
2846			return nil, nil, syncState, nil, err
2847		}
2848		syncState.redirtyOnRecoverableError[newInfo.BlockPointer] = oldPtr
2849	}
2850
2851	err = df.SetBlockSyncing(ctx, file.TailPointer())
2852	if err != nil {
2853		return nil, nil, syncState, nil, err
2854	}
2855	syncState.oldFileBlockPtrs = append(
2856		syncState.oldFileBlockPtrs, file.TailPointer())
2857
2858	// Capture the current de before we release the block lock, so
2859	// other deferred writes don't slip in.
2860	dd := fbo.newDirDataLocked(lState, *file.ParentPath(), chargedTo, md)
2861	de, err := dd.Lookup(ctx, file.TailName())
2862	if err != nil {
2863		return nil, nil, syncState, nil, err
2864	}
2865	dirtyDe = &de
2866
2867	// Leave a copy of the syncOp in `unrefCache`, since it may be
2868	// modified by future local writes while the syncOp in `md` should
2869	// only be modified by the rest of this sync process.
2870	var syncOpCopy *syncOp
2871	err = kbfscodec.Update(fbo.config.Codec(), &syncOpCopy, si.op)
2872	if err != nil {
2873		return nil, nil, syncState, nil, err
2874	}
2875	fbo.unrefCache[fileRef].op = syncOpCopy
2876
2877	// If there are any deferred bytes, it must be because this is
2878	// a retried sync and some blocks snuck in between sync. Those
2879	// blocks will get transferred now, but they are also on the
2880	// deferred list and will be retried on the next sync as well.
2881	df.AssimilateDeferredNewBytes()
2882
2883	// TODO: Returning si.bps in this way is racy, since si is a
2884	// member of unrefCache.
2885	return fblock, si.bps, syncState, dirtyDe, nil
2886}
2887
2888func prepDirtyEntryForSync(md *RootMetadata, si *syncInfo, dirtyDe *data.DirEntry) {
2889	// Add in the cached unref'd blocks.
2890	si.mergeUnrefCache(md)
2891	// Update the file's directory entry to the cached copy.
2892	if dirtyDe != nil {
2893		dirtyDe.EncodedSize = si.oldInfo.EncodedSize
2894	}
2895}
2896
2897// mergeDirtyEntryWithDBM sets the entry for a file into a directory,
2898// storing all the affected blocks into `dbm` rather than the dirty
2899// block cache.  It must only be called with an entry that's already
2900// been written to the dirty block cache, such that no new blocks are
2901// dirtied.
2902func (fbo *folderBlockOps) mergeDirtyEntryWithDBM(
2903	ctx context.Context, lState *kbfssync.LockState, file data.Path, md libkey.KeyMetadata,
2904	dbm dirBlockMap, dirtyDe data.DirEntry) error {
2905	// Lock and fetch for reading only, any dirty blocks will go into
2906	// the dbm.
2907	fbo.blockLock.RLock(lState)
2908	defer fbo.blockLock.RUnlock(lState)
2909
2910	chargedTo, err := fbo.getChargedToLocked(ctx, lState, md)
2911	if err != nil {
2912		return err
2913	}
2914
2915	dd := fbo.newDirDataWithDBMLocked(
2916		lState, *file.ParentPath(), chargedTo, md, dbm)
2917	unrefs, err := dd.SetEntry(ctx, file.TailName(), dirtyDe)
2918	if err != nil {
2919		return err
2920	}
2921	if len(unrefs) != 0 {
2922		return errors.Errorf(
2923			"Merging dirty entry produced %d new unrefs", len(unrefs))
2924	}
2925	return nil
2926}
2927
2928// StartSync starts a sync for the given file. It returns the new
2929// FileBlock which has the readied top-level block which includes all
2930// writes since the last sync. Must be used with CleanupSyncState()
2931// and UpdatePointers/FinishSyncLocked() like so:
2932//
2933// 	fblock, bps, dirtyDe, syncState, err :=
2934//		...fbo.StartSync(ctx, lState, md, uid, file)
2935//	defer func() {
2936//		...fbo.CleanupSyncState(
2937//			ctx, lState, md, file, ..., syncState, err)
2938//	}()
2939//	if err != nil {
2940//		...
2941//	}
2942//      ...
2943//
2944//
2945//	... = fbo.UpdatePointers(..., func() error {
2946//      ...fbo.FinishSyncLocked(ctx, lState, file, ..., syncState)
2947//  })
2948func (fbo *folderBlockOps) StartSync(ctx context.Context,
2949	lState *kbfssync.LockState, md *RootMetadata, file data.Path) (
2950	fblock *data.FileBlock, bps blockPutStateCopiable, dirtyDe *data.DirEntry,
2951	syncState fileSyncState, err error) {
2952	if jManager, err := GetJournalManager(fbo.config); err == nil {
2953		jManager.dirtyOpStart(fbo.id())
2954	}
2955
2956	fblock, bps, syncState, dirtyDe, err = fbo.startSyncWrite(
2957		ctx, lState, md, file)
2958	if err != nil {
2959		return nil, nil, nil, syncState, err
2960	}
2961
2962	prepDirtyEntryForSync(md, syncState.si, dirtyDe)
2963	return fblock, bps, dirtyDe, syncState, err
2964}
2965
2966// Does any clean-up for a sync of the given file, given an error
2967// (which may be nil) that happens during or after StartSync() and
2968// before FinishSync(). blocksToRemove may be nil.
2969func (fbo *folderBlockOps) CleanupSyncState(
2970	ctx context.Context, lState *kbfssync.LockState, md ReadOnlyRootMetadata,
2971	file data.Path, blocksToRemove []data.BlockPointer,
2972	result fileSyncState, err error) {
2973	if jManager, err := GetJournalManager(fbo.config); err == nil {
2974		defer jManager.dirtyOpEnd(fbo.id())
2975	}
2976
2977	if err == nil {
2978		return
2979	}
2980
2981	fbo.blockLock.Lock(lState)
2982	defer fbo.blockLock.Unlock(lState)
2983
2984	// Notify error listeners before we reset the dirty blocks and
2985	// permissions to be granted.
2986	fbo.notifyErrListenersLocked(lState, file.TailPointer(), err)
2987
2988	// If there was an error, we need to back out any changes that
2989	// might have been filled into the sync op, because it could
2990	// get reused again in a later Sync call.
2991	if result.si != nil {
2992		result.si.op.resetUpdateState()
2993
2994		// Save this MD for later, so we can clean up its
2995		// newly-referenced block pointers if necessary.
2996		bpsCopy, err := result.si.bps.deepCopy(ctx)
2997		if err != nil {
2998			return
2999		}
3000		result.si.toCleanIfUnused = append(result.si.toCleanIfUnused,
3001			mdToCleanIfUnused{md, bpsCopy})
3002	}
3003	if isRecoverableBlockError(err) {
3004		if result.si != nil {
3005			fbo.revertSyncInfoAfterRecoverableError(ctx, blocksToRemove, result)
3006		}
3007		if result.fblock != nil {
3008			result.fblock.Set(result.savedFblock)
3009			fbo.fixChildBlocksAfterRecoverableErrorLocked(
3010				ctx, lState, file, md,
3011				result.redirtyOnRecoverableError)
3012		}
3013	} else {
3014		// Since the sync has errored out unrecoverably, the deferred
3015		// bytes are already accounted for.
3016		ds := fbo.deferred[file.TailRef()]
3017		if df := fbo.dirtyFiles[file.TailPointer()]; df != nil {
3018			df.UpdateNotYetSyncingBytes(-ds.waitBytes)
3019
3020			// Some blocks that were dirty are now clean under their
3021			// readied block ID, and now live in the bps rather than
3022			// the dirty bcache, so we can delete them from the dirty
3023			// bcache.
3024			dirtyBcache := fbo.config.DirtyBlockCache()
3025			for _, ptr := range result.oldFileBlockPtrs {
3026				if df.IsBlockOrphaned(ptr) {
3027					fbo.vlog.CLogf(
3028						ctx, libkb.VLog1, "Deleting dirty orphan: %v", ptr)
3029					if err := dirtyBcache.Delete(fbo.id(), ptr,
3030						fbo.branch()); err != nil {
3031						fbo.vlog.CLogf(
3032							ctx, libkb.VLog1, "Couldn't delete %v", ptr)
3033					}
3034				}
3035			}
3036		}
3037
3038		// On an unrecoverable error, the deferred writes aren't
3039		// needed anymore since they're already part of the
3040		// (still-)dirty blocks.
3041		delete(fbo.deferred, file.TailRef())
3042	}
3043
3044	// The sync is over, due to an error, so reset the map so that we
3045	// don't defer any subsequent writes.
3046	// Old syncing blocks are now just dirty
3047	if df := fbo.dirtyFiles[file.TailPointer()]; df != nil {
3048		df.ResetSyncingBlocksToDirty()
3049	}
3050}
3051
3052// cleanUpUnusedBlocks cleans up the blocks from any previous failed
3053// sync attempts.
3054func (fbo *folderBlockOps) cleanUpUnusedBlocks(ctx context.Context,
3055	md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) error {
3056	numToClean := len(syncState.si.toCleanIfUnused)
3057	if numToClean == 0 {
3058		return nil
3059	}
3060
3061	// What blocks are referenced in the successful MD?
3062	refs := make(map[data.BlockPointer]bool)
3063	for _, op := range md.data.Changes.Ops {
3064		for _, ptr := range op.Refs() {
3065			if ptr == data.ZeroPtr {
3066				panic("Unexpected zero ref ptr in a sync MD revision")
3067			}
3068			refs[ptr] = true
3069		}
3070		for _, update := range op.allUpdates() {
3071			if update.Ref == data.ZeroPtr {
3072				panic("Unexpected zero update ref ptr in a sync MD revision")
3073			}
3074
3075			refs[update.Ref] = true
3076		}
3077	}
3078
3079	// For each MD to clean, clean up the old failed blocks
3080	// immediately if the merge status matches the successful put, if
3081	// they didn't get referenced in the successful put.  If the merge
3082	// status is different (e.g., we ended up on a conflict branch),
3083	// clean it up only if the original revision failed.  If the same
3084	// block appears more than once, the one with a different merged
3085	// status takes precedence (which will always come earlier in the
3086	// list of MDs).
3087	blocksSeen := make(map[data.BlockPointer]bool)
3088	for _, oldMD := range syncState.si.toCleanIfUnused {
3089		bdType := blockDeleteAlways
3090		if oldMD.md.MergedStatus() != md.MergedStatus() {
3091			bdType = blockDeleteOnMDFail
3092		}
3093
3094		failedBps := newBlockPutStateMemory(oldMD.bps.numBlocks())
3095		for _, ptr := range oldMD.bps.Ptrs() {
3096			if ptr == data.ZeroPtr {
3097				panic("Unexpected zero block ptr in an old sync MD revision")
3098			}
3099			if blocksSeen[ptr] {
3100				continue
3101			}
3102			blocksSeen[ptr] = true
3103			if refs[ptr] && bdType == blockDeleteAlways {
3104				continue
3105			}
3106			failedBps.blockStates[ptr] = blockState{}
3107			fbo.vlog.CLogf(
3108				ctx, libkb.VLog1, "Cleaning up block %v from a previous "+
3109					"failed revision %d (oldMD is %s, bdType=%d)", ptr,
3110				oldMD.md.Revision(), oldMD.md.MergedStatus(), bdType)
3111		}
3112
3113		if len(failedBps.blockStates) > 0 {
3114			fbm.cleanUpBlockState(oldMD.md, failedBps, bdType)
3115		}
3116	}
3117	return nil
3118}
3119
3120func (fbo *folderBlockOps) doDeferredWritesLocked(ctx context.Context,
3121	lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry,
3122	oldPath, newPath data.Path) (stillDirty bool, err error) {
3123	fbo.blockLock.AssertLocked(lState)
3124
3125	// Redo any writes or truncates that happened to our file while
3126	// the sync was happening.
3127	ds := fbo.deferred[oldPath.TailRef()]
3128	stillDirty = len(ds.writes) != 0
3129	delete(fbo.deferred, oldPath.TailRef())
3130
3131	// Clear any dirty blocks that resulted from a write/truncate
3132	// happening during the sync, since we're redoing them below.
3133	dirtyBcache := fbo.config.DirtyBlockCache()
3134	for _, ptr := range ds.dirtyDeletes {
3135		fbo.vlog.CLogf(
3136			ctx, libkb.VLog1, "Deleting deferred dirty ptr %v", ptr)
3137		if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
3138			return true, err
3139		}
3140	}
3141
3142	for _, f := range ds.writes {
3143		err = f(ctx, lState, kmd, newPath)
3144		if err != nil {
3145			// It's a little weird to return an error from a deferred
3146			// write here. Hopefully that will never happen.
3147			return true, err
3148		}
3149	}
3150	return stillDirty, nil
3151}
3152
3153// FinishSyncLocked finishes the sync process for a file, given the
3154// state from StartSync. Specifically, it re-applies any writes that
3155// happened since the call to StartSync.
3156func (fbo *folderBlockOps) FinishSyncLocked(
3157	ctx context.Context, lState *kbfssync.LockState,
3158	oldPath, newPath data.Path, md ReadOnlyRootMetadata,
3159	syncState fileSyncState, fbm *folderBlockManager) (
3160	stillDirty bool, err error) {
3161	fbo.blockLock.AssertLocked(lState)
3162
3163	dirtyBcache := fbo.config.DirtyBlockCache()
3164	for _, ptr := range syncState.oldFileBlockPtrs {
3165		fbo.vlog.CLogf(ctx, libkb.VLog1, "Deleting dirty ptr %v", ptr)
3166		if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
3167			return true, err
3168		}
3169	}
3170
3171	bcache := fbo.config.BlockCache()
3172	for _, ptr := range syncState.newIndirectFileBlockPtrs {
3173		err := bcache.DeletePermanent(ptr.ID)
3174		if err != nil {
3175			fbo.log.CWarningf(ctx, "Error when deleting %v from cache: %v",
3176				ptr.ID, err)
3177		}
3178	}
3179
3180	stillDirty, err = fbo.doDeferredWritesLocked(
3181		ctx, lState, md, oldPath, newPath)
3182	if err != nil {
3183		return true, err
3184	}
3185
3186	// Clear cached info for the old path.  We are guaranteed that any
3187	// concurrent write to this file was deferred, even if it was to a
3188	// block that wasn't currently being sync'd, since the top-most
3189	// block is always in dirtyFiles and is always dirtied during a
3190	// write/truncate.
3191	//
3192	// Also, we can get rid of all the sync state that might have
3193	// happened during the sync, since we will replay the writes
3194	// below anyway.
3195	if err := fbo.clearCacheInfoLocked(lState, oldPath); err != nil {
3196		return true, err
3197	}
3198
3199	if err := fbo.cleanUpUnusedBlocks(ctx, md, syncState, fbm); err != nil {
3200		return true, err
3201	}
3202
3203	return stillDirty, nil
3204}
3205
3206// notifyErrListeners notifies any write operations that are blocked
3207// on a file so that they can learn about unrecoverable sync errors.
3208func (fbo *folderBlockOps) notifyErrListenersLocked(
3209	lState *kbfssync.LockState, ptr data.BlockPointer, err error) {
3210	fbo.blockLock.AssertLocked(lState)
3211	if isRecoverableBlockError(err) {
3212		// Don't bother any listeners with this error, since the sync
3213		// will be retried.  Unless the sync has reached its retry
3214		// limit, but in that case the listeners will just proceed as
3215		// normal once the dirty block cache bytes are freed, and
3216		// that's ok since this error isn't fatal.
3217		return
3218	}
3219	df := fbo.dirtyFiles[ptr]
3220	if df != nil {
3221		df.NotifyErrListeners(err)
3222	}
3223}
3224
3225type searchWithOutOfDateCacheError struct {
3226}
3227
3228func (e searchWithOutOfDateCacheError) Error() string {
3229	return fmt.Sprintf("Search is using an out-of-date node cache; " +
3230		"try again with a clean cache.")
3231}
3232
3233// searchForNodesInDirLocked recursively tries to find a path, and
3234// ultimately a node, to ptr, given the set of pointers that were
3235// updated in a particular operation.  The keys in nodeMap make up the
3236// set of BlockPointers that are being searched for, and nodeMap is
3237// updated in place to include the corresponding discovered nodes.
3238//
3239// Returns the number of nodes found by this invocation.  If the error
3240// it returns is searchWithOutOfDateCache, the search should be
3241// retried by the caller with a clean cache.
3242func (fbo *folderBlockOps) searchForNodesInDirLocked(ctx context.Context,
3243	lState *kbfssync.LockState, cache NodeCache, newPtrs map[data.BlockPointer]bool,
3244	kmd libkey.KeyMetadata, rootNode Node, currDir data.Path, nodeMap map[data.BlockPointer]Node,
3245	numNodesFoundSoFar int) (int, error) {
3246	fbo.blockLock.AssertAnyLocked(lState)
3247
3248	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
3249	if err != nil {
3250		return 0, err
3251	}
3252	dd := fbo.newDirDataLocked(lState, currDir, chargedTo, kmd)
3253	entries, err := dd.GetEntries(ctx)
3254	if err != nil {
3255		return 0, err
3256	}
3257
3258	// getDirLocked may have unlocked blockLock, which means the cache
3259	// could have changed out from under us.  Verify that didn't
3260	// happen, so we can avoid messing it up with nodes from an old MD
3261	// version.  If it did happen, return a special error that lets
3262	// the caller know they should retry with a fresh cache.
3263	if currDir.Path[0].BlockPointer !=
3264		cache.PathFromNode(rootNode).TailPointer() {
3265		return 0, searchWithOutOfDateCacheError{}
3266	}
3267
3268	if numNodesFoundSoFar >= len(nodeMap) {
3269		return 0, nil
3270	}
3271
3272	numNodesFound := 0
3273	for name, de := range entries {
3274		childPath := currDir.ChildPath(name, de.BlockPointer, nil)
3275		if _, ok := nodeMap[de.BlockPointer]; ok {
3276			// make a node for every pathnode
3277			n := rootNode
3278			for i, pn := range childPath.Path[1:] {
3279				if !pn.BlockPointer.IsValid() {
3280					// Temporary debugging output for KBFS-1764 -- the
3281					// GetOrCreate call below will panic.
3282					fbo.log.CDebugf(ctx, "Invalid block pointer, path=%s, "+
3283						"path.path=%v (index %d), name=%s, de=%#v, "+
3284						"nodeMap=%v, newPtrs=%v, kmd=%#v",
3285						childPath, childPath.Path, i, name, de, nodeMap,
3286						newPtrs, kmd)
3287				}
3288				et := data.Dir
3289				if i == len(childPath.Path)-2 {
3290					et = de.Type
3291				}
3292				n, err = cache.GetOrCreate(pn.BlockPointer, pn.Name, n, et)
3293				if err != nil {
3294					return 0, err
3295				}
3296			}
3297			childPath.ChildObfuscator = n.Obfuscator()
3298			nodeMap[de.BlockPointer] = n
3299			numNodesFound++
3300			if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
3301				return numNodesFound, nil
3302			}
3303		}
3304
3305		// otherwise, recurse if this represents an updated block
3306		if _, ok := newPtrs[de.BlockPointer]; de.Type == data.Dir && ok {
3307			if childPath.Obfuscator() == nil {
3308				childPath.ChildObfuscator = fbo.nodeCache.ObfuscatorMaker()()
3309			}
3310			n, err := fbo.searchForNodesInDirLocked(ctx, lState, cache,
3311				newPtrs, kmd, rootNode, childPath, nodeMap,
3312				numNodesFoundSoFar+numNodesFound)
3313			if err != nil {
3314				return 0, err
3315			}
3316			numNodesFound += n
3317			if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
3318				return numNodesFound, nil
3319			}
3320		}
3321	}
3322
3323	return numNodesFound, nil
3324}
3325
3326func (fbo *folderBlockOps) trySearchWithCacheLocked(ctx context.Context,
3327	lState *kbfssync.LockState, cache NodeCache, ptrs []data.BlockPointer,
3328	newPtrs map[data.BlockPointer]bool, kmd libkey.KeyMetadata, rootPtr data.BlockPointer) (
3329	map[data.BlockPointer]Node, error) {
3330	fbo.blockLock.AssertAnyLocked(lState)
3331
3332	nodeMap := make(map[data.BlockPointer]Node)
3333	for _, ptr := range ptrs {
3334		nodeMap[ptr] = nil
3335	}
3336
3337	if len(ptrs) == 0 {
3338		return nodeMap, nil
3339	}
3340
3341	var node Node
3342	// The node cache used by the main part of KBFS is
3343	// fbo.nodeCache. This basically maps from BlockPointers to
3344	// Nodes. Nodes are used by the callers of the library, but
3345	// internally we need to know the series of BlockPointers and
3346	// file/dir names that make up the path of the corresponding
3347	// file/dir. fbo.nodeCache is long-lived and never invalidated.
3348	//
3349	// As folderBranchOps gets informed of new local or remote MD
3350	// updates, which change the BlockPointers of some subset of the
3351	// nodes in this TLF, it calls nodeCache.UpdatePointer for each
3352	// change. Then, when a caller passes some old Node they have
3353	// lying around into an FBO call, we can translate it to its
3354	// current path using fbo.nodeCache. Note that on every TLF
3355	// modification, we are guaranteed that the BlockPointer of the
3356	// root directory will change (because of the merkle-ish tree of
3357	// content hashes we use to assign BlockPointers).
3358	//
3359	// fbo.nodeCache needs to maintain the absolute latest mappings
3360	// for the TLF, or else FBO calls won't see up-to-date data. The
3361	// tension in search comes from the fact that we are trying to
3362	// discover the BlockPointers of certain files at a specific point
3363	// in the MD history, which is not necessarily the same as the
3364	// most-recently-seen MD update. Specifically, some callers
3365	// process a specific range of MDs, but folderBranchOps may have
3366	// heard about a newer one before, or during, when the caller
3367	// started processing. That means fbo.nodeCache may have been
3368	// updated to reflect the newest BlockPointers, and is no longer
3369	// correct as a cache for our search for the data at the old point
3370	// in time.
3371	if cache == fbo.nodeCache {
3372		// Root node should already exist if we have an up-to-date md.
3373		node = cache.Get(rootPtr.Ref())
3374		if node == nil {
3375			return nil, searchWithOutOfDateCacheError{}
3376		}
3377	} else {
3378		// Root node may or may not exist.
3379		var err error
3380		node, err = cache.GetOrCreate(rootPtr,
3381			data.NewPathPartString(
3382				string(kmd.GetTlfHandle().GetCanonicalName()), nil),
3383			nil, data.Dir)
3384		if err != nil {
3385			return nil, err
3386		}
3387	}
3388	if node == nil {
3389		return nil, fmt.Errorf("Cannot find root node corresponding to %v",
3390			rootPtr)
3391	}
3392
3393	// are they looking for the root directory?
3394	numNodesFound := 0
3395	if _, ok := nodeMap[rootPtr]; ok {
3396		nodeMap[rootPtr] = node
3397		numNodesFound++
3398		if numNodesFound >= len(nodeMap) {
3399			return nodeMap, nil
3400		}
3401	}
3402
3403	rootPath := cache.PathFromNode(node)
3404	if len(rootPath.Path) != 1 {
3405		return nil, fmt.Errorf("Invalid root path for %v: %s",
3406			rootPtr, rootPath)
3407	}
3408
3409	_, err := fbo.searchForNodesInDirLocked(ctx, lState, cache, newPtrs,
3410		kmd, node, rootPath, nodeMap, numNodesFound)
3411	if err != nil {
3412		return nil, err
3413	}
3414
3415	if rootPtr != cache.PathFromNode(node).TailPointer() {
3416		return nil, searchWithOutOfDateCacheError{}
3417	}
3418
3419	return nodeMap, nil
3420}
3421
3422func (fbo *folderBlockOps) searchForNodesLocked(ctx context.Context,
3423	lState *kbfssync.LockState, cache NodeCache, ptrs []data.BlockPointer,
3424	newPtrs map[data.BlockPointer]bool, kmd libkey.KeyMetadata,
3425	rootPtr data.BlockPointer) (map[data.BlockPointer]Node, NodeCache, error) {
3426	fbo.blockLock.AssertAnyLocked(lState)
3427
3428	// First try the passed-in cache.  If it doesn't work because the
3429	// cache is out of date, try again with a clean cache.
3430	nodeMap, err := fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
3431		newPtrs, kmd, rootPtr)
3432	if _, ok := err.(searchWithOutOfDateCacheError); ok {
3433		// The md is out-of-date, so use a throwaway cache so we
3434		// don't pollute the real node cache with stale nodes.
3435		fbo.vlog.CLogf(
3436			ctx, libkb.VLog1, "Root node %v doesn't exist in the node "+
3437				"cache; using a throwaway node cache instead",
3438			rootPtr)
3439		cache = newNodeCacheStandard(fbo.folderBranch)
3440		cache.SetObfuscatorMaker(fbo.nodeCache.ObfuscatorMaker())
3441		nodeMap, err = fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
3442			newPtrs, kmd, rootPtr)
3443	}
3444
3445	if err != nil {
3446		return nil, nil, err
3447	}
3448
3449	// Return the whole map even if some nodes weren't found.
3450	return nodeMap, cache, nil
3451}
3452
3453// SearchForNodes tries to resolve all the given pointers to a Node
3454// object, using only the updated pointers specified in newPtrs.
3455// Returns an error if any subset of the pointer paths do not exist;
3456// it is the caller's responsibility to decide to error on particular
3457// unresolved nodes.  It also returns the cache that ultimately
3458// contains the nodes -- this might differ from the passed-in cache if
3459// another goroutine updated that cache and it no longer contains the
3460// root pointer specified in md.
3461func (fbo *folderBlockOps) SearchForNodes(ctx context.Context,
3462	cache NodeCache, ptrs []data.BlockPointer, newPtrs map[data.BlockPointer]bool,
3463	kmd libkey.KeyMetadata, rootPtr data.BlockPointer) (
3464	map[data.BlockPointer]Node, NodeCache, error) {
3465	lState := makeFBOLockState()
3466	fbo.blockLock.RLock(lState)
3467	defer fbo.blockLock.RUnlock(lState)
3468	return fbo.searchForNodesLocked(
3469		ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
3470}
3471
3472// SearchForPaths is like SearchForNodes, except it returns a
3473// consistent view of all the paths of the searched-for pointers.
3474func (fbo *folderBlockOps) SearchForPaths(ctx context.Context,
3475	cache NodeCache, ptrs []data.BlockPointer, newPtrs map[data.BlockPointer]bool,
3476	kmd libkey.KeyMetadata, rootPtr data.BlockPointer) (map[data.BlockPointer]data.Path, error) {
3477	lState := makeFBOLockState()
3478	// Hold the lock while processing the paths so they can't be changed.
3479	fbo.blockLock.RLock(lState)
3480	defer fbo.blockLock.RUnlock(lState)
3481	nodeMap, cache, err :=
3482		fbo.searchForNodesLocked(
3483			ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
3484	if err != nil {
3485		return nil, err
3486	}
3487
3488	paths := make(map[data.BlockPointer]data.Path)
3489	for ptr, n := range nodeMap {
3490		if n == nil {
3491			paths[ptr] = data.Path{}
3492			continue
3493		}
3494
3495		p := cache.PathFromNode(n)
3496		if p.TailPointer() != ptr {
3497			return nil, NodeNotFoundError{ptr}
3498		}
3499		paths[ptr] = p
3500	}
3501
3502	return paths, nil
3503}
3504
3505// UpdateCachedEntryAttributesOnRemovedFile updates any cached entry
3506// for the given path of an unlinked file, according to the given op,
3507// and it makes a new dirty cache entry if one doesn't exist yet.  We
3508// assume Sync will be called eventually on the corresponding open
3509// file handle, which will clear out the entry.
3510func (fbo *folderBlockOps) UpdateCachedEntryAttributesOnRemovedFile(
3511	ctx context.Context, lState *kbfssync.LockState,
3512	kmd KeyMetadataWithRootDirEntry, op *setAttrOp, p data.Path, de data.DirEntry) error {
3513	fbo.blockLock.Lock(lState)
3514	defer fbo.blockLock.Unlock(lState)
3515	_, err := fbo.setCachedAttrLocked(
3516		ctx, lState, kmd, *p.ParentPath(), p.TailName(), op.Attr, de)
3517	return err
3518}
3519
3520func (fbo *folderBlockOps) getDeferredWriteCountForTest(
3521	lState *kbfssync.LockState) int {
3522	fbo.blockLock.RLock(lState)
3523	defer fbo.blockLock.RUnlock(lState)
3524	writes := 0
3525	for _, ds := range fbo.deferred {
3526		writes += len(ds.writes)
3527	}
3528	return writes
3529}
3530
3531func (fbo *folderBlockOps) updatePointer(kmd libkey.KeyMetadata, oldPtr data.BlockPointer, newPtr data.BlockPointer, shouldPrefetch bool) NodeID {
3532	updatedNode := fbo.nodeCache.UpdatePointer(oldPtr.Ref(), newPtr)
3533	if updatedNode == nil || oldPtr.ID == newPtr.ID {
3534		return nil
3535	}
3536
3537	// Only prefetch if the updated pointer is a new block ID.
3538	// TODO: Remove this comment when we're done debugging because it'll be everywhere.
3539	ctx := context.TODO()
3540	fbo.vlog.CLogf(
3541		ctx, libkb.VLog1, "Updated reference for pointer %s to %s.",
3542		oldPtr.ID, newPtr.ID)
3543	if shouldPrefetch {
3544		// Prefetch the new ref, but only if the old ref already exists in
3545		// the block cache. Ideally we'd always prefetch it, but we need
3546		// the type of the block so that we can call `NewEmpty`.
3547		block, lifetime, err := fbo.config.BlockCache().GetWithLifetime(oldPtr)
3548		if err != nil {
3549			return updatedNode
3550		}
3551
3552		// No need to cache because it's already cached.
3553		action := fbo.config.Mode().DefaultBlockRequestAction()
3554		if fbo.branch() != data.MasterBranch {
3555			action = action.AddNonMasterBranch()
3556		}
3557		_ = fbo.config.BlockOps().BlockRetriever().Request(
3558			ctx, updatePointerPrefetchPriority, kmd, newPtr, block.NewEmpty(),
3559			lifetime, action)
3560	}
3561	// Cancel any prefetches for the old pointer from the prefetcher.
3562	fbo.config.BlockOps().Prefetcher().CancelPrefetch(oldPtr)
3563	return updatedNode
3564}
3565
3566// UpdatePointers updates all the pointers in the node cache
3567// atomically.  If `afterUpdateFn` is non-nil, it's called under the
3568// same block lock under which the pointers were updated.
3569func (fbo *folderBlockOps) UpdatePointers(
3570	kmd libkey.KeyMetadata, lState *kbfssync.LockState, op op, shouldPrefetch bool,
3571	afterUpdateFn func() error) (affectedNodeIDs []NodeID, err error) {
3572	fbo.blockLock.Lock(lState)
3573	defer fbo.blockLock.Unlock(lState)
3574	for _, update := range op.allUpdates() {
3575		updatedNode := fbo.updatePointer(
3576			kmd, update.Unref, update.Ref, shouldPrefetch)
3577		if updatedNode != nil {
3578			affectedNodeIDs = append(affectedNodeIDs, updatedNode)
3579		}
3580	}
3581
3582	// Cancel any prefetches for all unreferenced block pointers.
3583	for _, unref := range op.Unrefs() {
3584		fbo.config.BlockOps().Prefetcher().CancelPrefetch(unref)
3585	}
3586
3587	if afterUpdateFn == nil {
3588		return affectedNodeIDs, nil
3589	}
3590
3591	return affectedNodeIDs, afterUpdateFn()
3592}
3593
3594func (fbo *folderBlockOps) unlinkDuringFastForwardLocked(ctx context.Context,
3595	lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry, ref data.BlockRef) (undoFn func()) {
3596	fbo.blockLock.AssertLocked(lState)
3597	oldNode := fbo.nodeCache.Get(ref)
3598	if oldNode == nil {
3599		return nil
3600	}
3601	oldPath := fbo.nodeCache.PathFromNode(oldNode)
3602	fbo.vlog.CLogf(
3603		ctx, libkb.VLog1, "Unlinking missing node %s/%v during "+
3604			"fast-forward", oldPath, ref)
3605	de, err := fbo.getEntryLocked(ctx, lState, kmd, oldPath, true)
3606	if err != nil {
3607		fbo.log.CDebugf(ctx, "Couldn't find old dir entry for %s/%v: %+v",
3608			oldPath, ref, err)
3609	}
3610	return fbo.nodeCache.Unlink(ref, oldPath, de)
3611}
3612
3613type nodeChildrenMap map[string]map[data.PathNode]bool
3614
3615func (ncm nodeChildrenMap) addDirChange(
3616	node Node, p data.Path, changes []NodeChange, affectedNodeIDs []NodeID) (
3617	[]NodeChange, []NodeID) {
3618	change := NodeChange{Node: node}
3619	for subchild := range ncm[p.String()] {
3620		change.DirUpdated = append(change.DirUpdated, subchild.Name)
3621	}
3622	changes = append(changes, change)
3623	affectedNodeIDs = append(affectedNodeIDs, node.GetID())
3624	return changes, affectedNodeIDs
3625}
3626
3627func (nodeChildrenMap) addFileChange(
3628	node Node, changes []NodeChange, affectedNodeIDs []NodeID) (
3629	[]NodeChange, []NodeID) {
3630	// Invalidate the entire file contents.
3631	changes = append(changes, NodeChange{
3632		Node:        node,
3633		FileUpdated: []WriteRange{{Len: 0, Off: 0}},
3634	})
3635	affectedNodeIDs = append(affectedNodeIDs, node.GetID())
3636	return changes, affectedNodeIDs
3637}
3638
3639func (fbo *folderBlockOps) fastForwardDirAndChildrenLocked(ctx context.Context,
3640	lState *kbfssync.LockState, currDir data.Path, children nodeChildrenMap,
3641	kmd KeyMetadataWithRootDirEntry,
3642	updates map[data.BlockPointer]data.BlockPointer) (
3643	changes []NodeChange, affectedNodeIDs []NodeID, undoFns []func(),
3644	err error) {
3645	fbo.blockLock.AssertLocked(lState)
3646
3647	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
3648	if err != nil {
3649		return nil, nil, undoFns, err
3650	}
3651	dd := fbo.newDirDataLocked(lState, currDir, chargedTo, kmd)
3652	entries, err := dd.GetEntries(ctx)
3653	if err != nil {
3654		return nil, nil, undoFns, err
3655	}
3656
3657	prefix := currDir.String()
3658
3659	// TODO: parallelize me?
3660	for child := range children[prefix] {
3661		entry, ok := entries[child.Name]
3662		if !ok {
3663			undoFn := fbo.unlinkDuringFastForwardLocked(
3664				ctx, lState, kmd, child.BlockPointer.Ref())
3665			if undoFn != nil {
3666				undoFns = append(undoFns, undoFn)
3667			}
3668			continue
3669		}
3670
3671		fbo.vlog.CLogf(
3672			ctx, libkb.VLog1, "Fast-forwarding %v -> %v",
3673			child.BlockPointer, entry.BlockPointer)
3674		fbo.updatePointer(kmd, child.BlockPointer,
3675			entry.BlockPointer, true)
3676		updates[child.BlockPointer] = entry.BlockPointer
3677		node := fbo.nodeCache.Get(entry.BlockPointer.Ref())
3678		if node == nil {
3679			fbo.vlog.CLogf(
3680				ctx, libkb.VLog1, "Skipping missing node for %s",
3681				entry.BlockPointer)
3682			continue
3683		}
3684		if entry.Type == data.Dir {
3685			newPath := fbo.nodeCache.PathFromNode(node)
3686			changes, affectedNodeIDs = children.addDirChange(
3687				node, newPath, changes, affectedNodeIDs)
3688
3689			childChanges, childAffectedNodeIDs, childUndoFns, err :=
3690				fbo.fastForwardDirAndChildrenLocked(
3691					ctx, lState, newPath, children, kmd, updates)
3692			undoFns = append(undoFns, childUndoFns...)
3693			if err != nil {
3694				return nil, nil, undoFns, err
3695			}
3696			changes = append(changes, childChanges...)
3697			affectedNodeIDs = append(affectedNodeIDs, childAffectedNodeIDs...)
3698		} else {
3699			// File -- invalidate the entire file contents.
3700			changes, affectedNodeIDs = children.addFileChange(
3701				node, changes, affectedNodeIDs)
3702		}
3703	}
3704	delete(children, prefix)
3705	return changes, affectedNodeIDs, undoFns, nil
3706}
3707
3708func (fbo *folderBlockOps) makeChildrenTreeFromNodesLocked(
3709	lState *kbfssync.LockState, nodes []Node) (
3710	rootPath data.Path, children nodeChildrenMap) {
3711	fbo.blockLock.AssertLocked(lState)
3712
3713	// Build a "tree" representation for each interesting path prefix.
3714	children = make(nodeChildrenMap)
3715	for _, n := range nodes {
3716		p := fbo.nodeCache.PathFromNode(n)
3717		if len(p.Path) == 1 {
3718			rootPath = p
3719		}
3720		prevPath := ""
3721		for _, pn := range p.Path {
3722			if prevPath != "" {
3723				childPNs := children[prevPath]
3724				if childPNs == nil {
3725					childPNs = make(map[data.PathNode]bool)
3726					children[prevPath] = childPNs
3727				}
3728				childPNs[pn] = true
3729			}
3730			prevPath = pathlib.Join(prevPath, pn.Name.Plaintext())
3731		}
3732	}
3733	return rootPath, children
3734}
3735
3736// FastForwardAllNodes attempts to update the block pointers
3737// associated with nodes in the cache by searching for their paths in
3738// the current version of the TLF.  If it can't find a corresponding
3739// node, it assumes it's been deleted and unlinks it.  Returns the set
3740// of node changes that resulted.  If there are no nodes, it returns a
3741// nil error because there's nothing to be done.
3742func (fbo *folderBlockOps) FastForwardAllNodes(ctx context.Context,
3743	lState *kbfssync.LockState, md ReadOnlyRootMetadata) (
3744	changes []NodeChange, affectedNodeIDs []NodeID, err error) {
3745	if fbo.nodeCache == nil {
3746		// Nothing needs to be done!
3747		return nil, nil, nil
3748	}
3749
3750	// Take a hard lock through this whole process.  TODO: is there
3751	// any way to relax this?  It could lead to file system operation
3752	// timeouts, even on reads, if we hold it too long.
3753	fbo.blockLock.Lock(lState)
3754	defer fbo.blockLock.Unlock(lState)
3755
3756	nodes := fbo.nodeCache.AllNodes()
3757	if len(nodes) == 0 {
3758		// Nothing needs to be done!
3759		return nil, nil, nil
3760	}
3761	fbo.vlog.CLogf(ctx, libkb.VLog1, "Fast-forwarding %d nodes", len(nodes))
3762	defer func() {
3763		fbo.vlog.CLogf(ctx, libkb.VLog1, "Fast-forward complete: %v", err)
3764	}()
3765
3766	rootPath, children := fbo.makeChildrenTreeFromNodesLocked(lState, nodes)
3767	if !rootPath.IsValid() {
3768		return nil, nil, errors.New("Couldn't find the root path")
3769	}
3770
3771	fbo.vlog.CLogf(
3772		ctx, libkb.VLog1, "Fast-forwarding root %v -> %v",
3773		rootPath.Path[0].BlockPointer, md.data.Dir.BlockPointer)
3774	fbo.updatePointer(md, rootPath.Path[0].BlockPointer,
3775		md.data.Dir.BlockPointer, false)
3776
3777	// Keep track of all the pointer updates done, and unwind them if
3778	// there's any error.
3779	updates := make(map[data.BlockPointer]data.BlockPointer)
3780	updates[rootPath.Path[0].BlockPointer] = md.data.Dir.BlockPointer
3781	var undoFns []func()
3782	defer func() {
3783		if err == nil {
3784			return
3785		}
3786		for oldID, newID := range updates {
3787			fbo.updatePointer(md, newID, oldID, false)
3788		}
3789		for _, f := range undoFns {
3790			f()
3791		}
3792	}()
3793
3794	rootPath.Path[0].BlockPointer = md.data.Dir.BlockPointer
3795	rootNode := fbo.nodeCache.Get(md.data.Dir.BlockPointer.Ref())
3796	if rootNode != nil {
3797		change := NodeChange{Node: rootNode}
3798		for child := range children[rootPath.String()] {
3799			change.DirUpdated = append(change.DirUpdated, child.Name)
3800		}
3801		changes = append(changes, change)
3802		affectedNodeIDs = append(affectedNodeIDs, rootNode.GetID())
3803	}
3804
3805	childChanges, childAffectedNodeIDs, undoFns, err :=
3806		fbo.fastForwardDirAndChildrenLocked(
3807			ctx, lState, rootPath, children, md, updates)
3808	if err != nil {
3809		return nil, nil, err
3810	}
3811	changes = append(changes, childChanges...)
3812	affectedNodeIDs = append(affectedNodeIDs, childAffectedNodeIDs...)
3813
3814	// Unlink any children that remain.
3815	for _, childPNs := range children {
3816		for child := range childPNs {
3817			fbo.unlinkDuringFastForwardLocked(
3818				ctx, lState, md, child.BlockPointer.Ref())
3819		}
3820	}
3821	return changes, affectedNodeIDs, nil
3822}
3823
3824func (fbo *folderBlockOps) getInvalidationChangesForNodes(
3825	ctx context.Context, lState *kbfssync.LockState, nodes []Node) (
3826	changes []NodeChange, affectedNodeIDs []NodeID, err error) {
3827	fbo.blockLock.AssertLocked(lState)
3828	if len(nodes) == 0 {
3829		// Nothing needs to be done!
3830		return nil, nil, nil
3831	}
3832
3833	_, children := fbo.makeChildrenTreeFromNodesLocked(lState, nodes)
3834	for _, node := range nodes {
3835		p := fbo.nodeCache.PathFromNode(node)
3836		prefix := p.String()
3837		childNodes := children[prefix]
3838		if len(childNodes) > 0 {
3839			// This must be a directory.  Invalidate all children.
3840			changes, affectedNodeIDs = children.addDirChange(
3841				node, p, changes, affectedNodeIDs)
3842			fbo.vlog.CLogf(
3843				ctx, libkb.VLog1, "Invalidating dir node %p/%s", node, prefix)
3844		} else {
3845			// This might be a file.  In any case, it doesn't have any
3846			// children that need invalidation, so just send the file
3847			// change.
3848			changes, affectedNodeIDs = children.addFileChange(
3849				node, changes, affectedNodeIDs)
3850			fbo.vlog.CLogf(
3851				ctx, libkb.VLog1, "Invalidating possible file node %p/%s",
3852				node, prefix)
3853		}
3854	}
3855	return changes, affectedNodeIDs, nil
3856}
3857
3858// GetInvalidationChangesForNode returns the list of invalidation
3859// notifications for all the nodes rooted at the given node.
3860func (fbo *folderBlockOps) GetInvalidationChangesForNode(
3861	ctx context.Context, lState *kbfssync.LockState, node Node) (
3862	changes []NodeChange, affectedNodeIDs []NodeID, err error) {
3863	if fbo.nodeCache == nil {
3864		// Nothing needs to be done!
3865		return nil, nil, nil
3866	}
3867
3868	fbo.blockLock.Lock(lState)
3869	defer fbo.blockLock.Unlock(lState)
3870	fbo.vlog.CLogf(
3871		ctx, libkb.VLog1, "About to get all children for node %p", node)
3872	childNodes := fbo.nodeCache.AllNodeChildren(node)
3873	fbo.vlog.CLogf(
3874		ctx, libkb.VLog1, "Found %d children for node %p", len(childNodes),
3875		node)
3876	return fbo.getInvalidationChangesForNodes(
3877		ctx, lState, append(childNodes, node))
3878}
3879
3880// GetInvalidationChangesForAll returns the list of invalidation
3881// notifications for the entire TLF.
3882func (fbo *folderBlockOps) GetInvalidationChangesForAll(
3883	ctx context.Context, lState *kbfssync.LockState) (
3884	changes []NodeChange, affectedNodeIDs []NodeID, err error) {
3885	if fbo.nodeCache == nil {
3886		// Nothing needs to be done!
3887		return nil, nil, nil
3888	}
3889
3890	fbo.blockLock.Lock(lState)
3891	defer fbo.blockLock.Unlock(lState)
3892	childNodes := fbo.nodeCache.AllNodes()
3893	fbo.vlog.CLogf(ctx, libkb.VLog1, "Found %d nodes", len(childNodes))
3894	return fbo.getInvalidationChangesForNodes(ctx, lState, childNodes)
3895}
3896
3897// MarkNode marks all the blocks in the node's block tree with the
3898// given tag.
3899func (fbo *folderBlockOps) MarkNode(
3900	ctx context.Context, lState *kbfssync.LockState, node Node, kmd libkey.KeyMetadata,
3901	tag string, cacheType DiskBlockCacheType) error {
3902	dbc := fbo.config.DiskBlockCache()
3903	if dbc == nil {
3904		return nil
3905	}
3906
3907	fbo.blockLock.RLock(lState)
3908	defer fbo.blockLock.RUnlock(lState)
3909
3910	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
3911	if err != nil {
3912		return err
3913	}
3914	p := fbo.nodeCache.PathFromNode(node)
3915	err = dbc.Mark(ctx, p.TailPointer().ID, tag, cacheType)
3916	if err != nil {
3917		return err
3918	}
3919	var infos []data.BlockInfo
3920	if node.EntryType() == data.Dir {
3921		dd := fbo.newDirDataLocked(lState, p, chargedTo, kmd)
3922		infos, err = dd.GetIndirectDirBlockInfos(ctx)
3923	} else {
3924		fd := fbo.newFileData(lState, p, chargedTo, kmd)
3925		infos, err = fd.GetIndirectFileBlockInfos(ctx)
3926	}
3927	if err != nil {
3928		return err
3929	}
3930
3931	for _, info := range infos {
3932		err = dbc.Mark(ctx, info.BlockPointer.ID, tag, cacheType)
3933		switch errors.Cause(err).(type) {
3934		case nil:
3935		case data.NoSuchBlockError:
3936		default:
3937			return err
3938		}
3939	}
3940	return nil
3941}
3942
3943type chainsPathPopulator interface {
3944	populateChainPaths(context.Context, logger.Logger, *crChains, bool) error
3945	obfuscatorMaker() func() data.Obfuscator
3946}
3947
3948// populateChainPaths updates all the paths in all the ops tracked by
3949// `chains`, using the main nodeCache.
3950func (fbo *folderBlockOps) populateChainPaths(ctx context.Context,
3951	log logger.Logger, chains *crChains, includeCreates bool) error {
3952	_, err := chains.getPaths(
3953		ctx, fbo, log, fbo.nodeCache, includeCreates,
3954		fbo.config.Mode().IsTestMode())
3955	return err
3956}
3957
3958func (fbo *folderBlockOps) obfuscatorMaker() func() data.Obfuscator {
3959	return fbo.nodeCache.ObfuscatorMaker()
3960}
3961
3962var _ chainsPathPopulator = (*folderBlockOps)(nil)
3963