1// Copyright (C) 2014 The Syncthing Authors. 2// 3// This Source Code Form is subject to the terms of the Mozilla Public 4// License, v. 2.0. If a copy of the MPL was not distributed with this file, 5// You can obtain one at https://mozilla.org/MPL/2.0/. 6 7//go:generate -command counterfeiter go run github.com/maxbrunsfeld/counterfeiter/v6 8//go:generate counterfeiter -o mocks/model.go --fake-name Model . Model 9 10package model 11 12import ( 13 "bytes" 14 "context" 15 "encoding/json" 16 "fmt" 17 "io" 18 "net" 19 "os" 20 "path/filepath" 21 "reflect" 22 "runtime" 23 "strings" 24 stdsync "sync" 25 "time" 26 27 "github.com/pkg/errors" 28 "github.com/thejerf/suture/v4" 29 30 "github.com/syncthing/syncthing/lib/config" 31 "github.com/syncthing/syncthing/lib/connections" 32 "github.com/syncthing/syncthing/lib/db" 33 "github.com/syncthing/syncthing/lib/events" 34 "github.com/syncthing/syncthing/lib/fs" 35 "github.com/syncthing/syncthing/lib/ignore" 36 "github.com/syncthing/syncthing/lib/osutil" 37 "github.com/syncthing/syncthing/lib/protocol" 38 "github.com/syncthing/syncthing/lib/scanner" 39 "github.com/syncthing/syncthing/lib/stats" 40 "github.com/syncthing/syncthing/lib/svcutil" 41 "github.com/syncthing/syncthing/lib/sync" 42 "github.com/syncthing/syncthing/lib/ur/contract" 43 "github.com/syncthing/syncthing/lib/util" 44 "github.com/syncthing/syncthing/lib/versioner" 45) 46 47type service interface { 48 suture.Service 49 BringToFront(string) 50 Override() 51 Revert() 52 DelayScan(d time.Duration) 53 SchedulePull() // something relevant changed, we should try a pull 54 Jobs(page, perpage int) ([]string, []string, int) // In progress, Queued, skipped 55 Scan(subs []string) error 56 Errors() []FileError 57 WatchError() error 58 ScheduleForceRescan(path string) 59 GetStatistics() (stats.FolderStatistics, error) 60 61 getState() (folderState, time.Time, error) 62} 63 64type Availability struct { 65 ID protocol.DeviceID `json:"id"` 66 FromTemporary bool `json:"fromTemporary"` 67} 68 69type Model interface { 70 suture.Service 71 72 connections.Model 73 74 ResetFolder(folder string) 75 DelayScan(folder string, next time.Duration) 76 ScanFolder(folder string) error 77 ScanFolders() map[string]error 78 ScanFolderSubdirs(folder string, subs []string) error 79 State(folder string) (string, time.Time, error) 80 FolderErrors(folder string) ([]FileError, error) 81 WatchError(folder string) error 82 Override(folder string) 83 Revert(folder string) 84 BringToFront(folder, file string) 85 LoadIgnores(folder string) ([]string, []string, error) 86 CurrentIgnores(folder string) ([]string, []string, error) 87 SetIgnores(folder string, content []string) error 88 89 GetFolderVersions(folder string) (map[string][]versioner.FileVersion, error) 90 RestoreFolderVersions(folder string, versions map[string]time.Time) (map[string]error, error) 91 92 DBSnapshot(folder string) (*db.Snapshot, error) 93 NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, error) 94 RemoteNeedFolderFiles(folder string, device protocol.DeviceID, page, perpage int) ([]db.FileInfoTruncated, error) 95 LocalChangedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, error) 96 FolderProgressBytesCompleted(folder string) int64 97 98 CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool, error) 99 CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool, error) 100 GetMtimeMapping(folder string, file string) (fs.MtimeMapping, error) 101 Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) ([]Availability, error) 102 103 Completion(device protocol.DeviceID, folder string) (FolderCompletion, error) 104 ConnectionStats() map[string]interface{} 105 DeviceStatistics() (map[protocol.DeviceID]stats.DeviceStatistics, error) 106 FolderStatistics() (map[string]stats.FolderStatistics, error) 107 UsageReportingStats(report *contract.Report, version int, preview bool) 108 109 PendingDevices() (map[protocol.DeviceID]db.ObservedDevice, error) 110 PendingFolders(device protocol.DeviceID) (map[string]db.PendingFolder, error) 111 DismissPendingDevice(device protocol.DeviceID) error 112 DismissPendingFolder(device protocol.DeviceID, folder string) error 113 114 StartDeadlockDetector(timeout time.Duration) 115 GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly bool) ([]*TreeEntry, error) 116} 117 118type model struct { 119 *suture.Supervisor 120 121 // constructor parameters 122 cfg config.Wrapper 123 id protocol.DeviceID 124 clientName string 125 clientVersion string 126 db *db.Lowlevel 127 protectedFiles []string 128 evLogger events.Logger 129 130 // constant or concurrency safe fields 131 finder *db.BlockFinder 132 progressEmitter *ProgressEmitter 133 shortID protocol.ShortID 134 // globalRequestLimiter limits the amount of data in concurrent incoming 135 // requests 136 globalRequestLimiter *util.Semaphore 137 // folderIOLimiter limits the number of concurrent I/O heavy operations, 138 // such as scans and pulls. 139 folderIOLimiter *util.Semaphore 140 fatalChan chan error 141 started chan struct{} 142 143 // fields protected by fmut 144 fmut sync.RWMutex 145 folderCfgs map[string]config.FolderConfiguration // folder -> cfg 146 folderFiles map[string]*db.FileSet // folder -> files 147 deviceStatRefs map[protocol.DeviceID]*stats.DeviceStatisticsReference // deviceID -> statsRef 148 folderIgnores map[string]*ignore.Matcher // folder -> matcher object 149 folderRunners map[string]service // folder -> puller or scanner 150 folderRunnerToken map[string]suture.ServiceToken // folder -> token for folder runner 151 folderRestartMuts syncMutexMap // folder -> restart mutex 152 folderVersioners map[string]versioner.Versioner // folder -> versioner (may be nil) 153 folderEncryptionPasswordTokens map[string][]byte // folder -> encryption token (may be missing, and only for encryption type folders) 154 folderEncryptionFailures map[string]map[protocol.DeviceID]error // folder -> device -> error regarding encryption consistency (may be missing) 155 156 // fields protected by pmut 157 pmut sync.RWMutex 158 conn map[protocol.DeviceID]protocol.Connection 159 connRequestLimiters map[protocol.DeviceID]*util.Semaphore 160 closed map[protocol.DeviceID]chan struct{} 161 helloMessages map[protocol.DeviceID]protocol.Hello 162 deviceDownloads map[protocol.DeviceID]*deviceDownloadState 163 remotePausedFolders map[protocol.DeviceID]map[string]struct{} // deviceID -> folders 164 indexHandlers map[protocol.DeviceID]*indexHandlerRegistry 165 166 // for testing only 167 foldersRunning int32 168} 169 170type folderFactory func(*model, *db.FileSet, *ignore.Matcher, config.FolderConfiguration, versioner.Versioner, events.Logger, *util.Semaphore) service 171 172var ( 173 folderFactories = make(map[config.FolderType]folderFactory) 174) 175 176var ( 177 errDeviceUnknown = errors.New("unknown device") 178 errDevicePaused = errors.New("device is paused") 179 errDeviceIgnored = errors.New("device is ignored") 180 errDeviceRemoved = errors.New("device has been removed") 181 ErrFolderPaused = errors.New("folder is paused") 182 ErrFolderNotRunning = errors.New("folder is not running") 183 ErrFolderMissing = errors.New("no such folder") 184 errNetworkNotAllowed = errors.New("network not allowed") 185 errNoVersioner = errors.New("folder has no versioner") 186 // errors about why a connection is closed 187 errReplacingConnection = errors.New("replacing connection") 188 errStopped = errors.New("Syncthing is being stopped") 189 errEncryptionInvConfigLocal = errors.New("can't encrypt outgoing data because local data is encrypted (folder-type receive-encrypted)") 190 errEncryptionInvConfigRemote = errors.New("remote has encrypted data and encrypts that data for us - this is impossible") 191 errEncryptionNotEncryptedLocal = errors.New("remote expects to exchange encrypted data, but is configured for plain data") 192 errEncryptionPlainForReceiveEncrypted = errors.New("remote expects to exchange plain data, but is configured to be encrypted") 193 errEncryptionPlainForRemoteEncrypted = errors.New("remote expects to exchange plain data, but local data is encrypted (folder-type receive-encrypted)") 194 errEncryptionNotEncryptedUntrusted = errors.New("device is untrusted, but configured to receive plain data") 195 errEncryptionPassword = errors.New("different encryption passwords used") 196 errEncryptionTokenRead = errors.New("failed to read encryption token") 197 errEncryptionTokenWrite = errors.New("failed to write encryption token") 198 errMissingRemoteInClusterConfig = errors.New("remote device missing in cluster config") 199 errMissingLocalInClusterConfig = errors.New("local device missing in cluster config") 200 errConnLimitReached = errors.New("connection limit reached") 201) 202 203// NewModel creates and starts a new model. The model starts in read-only mode, 204// where it sends index information to connected peers and responds to requests 205// for file data without altering the local folder in any way. 206func NewModel(cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersion string, ldb *db.Lowlevel, protectedFiles []string, evLogger events.Logger) Model { 207 spec := svcutil.SpecWithDebugLogger(l) 208 m := &model{ 209 Supervisor: suture.New("model", spec), 210 211 // constructor parameters 212 cfg: cfg, 213 id: id, 214 clientName: clientName, 215 clientVersion: clientVersion, 216 db: ldb, 217 protectedFiles: protectedFiles, 218 evLogger: evLogger, 219 220 // constant or concurrency safe fields 221 finder: db.NewBlockFinder(ldb), 222 progressEmitter: NewProgressEmitter(cfg, evLogger), 223 shortID: id.Short(), 224 globalRequestLimiter: util.NewSemaphore(1024 * cfg.Options().MaxConcurrentIncomingRequestKiB()), 225 folderIOLimiter: util.NewSemaphore(cfg.Options().MaxFolderConcurrency()), 226 fatalChan: make(chan error), 227 started: make(chan struct{}), 228 229 // fields protected by fmut 230 fmut: sync.NewRWMutex(), 231 folderCfgs: make(map[string]config.FolderConfiguration), 232 folderFiles: make(map[string]*db.FileSet), 233 deviceStatRefs: make(map[protocol.DeviceID]*stats.DeviceStatisticsReference), 234 folderIgnores: make(map[string]*ignore.Matcher), 235 folderRunners: make(map[string]service), 236 folderRunnerToken: make(map[string]suture.ServiceToken), 237 folderVersioners: make(map[string]versioner.Versioner), 238 folderEncryptionPasswordTokens: make(map[string][]byte), 239 folderEncryptionFailures: make(map[string]map[protocol.DeviceID]error), 240 241 // fields protected by pmut 242 pmut: sync.NewRWMutex(), 243 conn: make(map[protocol.DeviceID]protocol.Connection), 244 connRequestLimiters: make(map[protocol.DeviceID]*util.Semaphore), 245 closed: make(map[protocol.DeviceID]chan struct{}), 246 helloMessages: make(map[protocol.DeviceID]protocol.Hello), 247 deviceDownloads: make(map[protocol.DeviceID]*deviceDownloadState), 248 remotePausedFolders: make(map[protocol.DeviceID]map[string]struct{}), 249 indexHandlers: make(map[protocol.DeviceID]*indexHandlerRegistry), 250 } 251 for devID := range cfg.Devices() { 252 m.deviceStatRefs[devID] = stats.NewDeviceStatisticsReference(m.db, devID) 253 } 254 m.Add(m.progressEmitter) 255 m.Add(svcutil.AsService(m.serve, m.String())) 256 257 return m 258} 259 260func (m *model) serve(ctx context.Context) error { 261 defer m.closeAllConnectionsAndWait() 262 263 cfg := m.cfg.Subscribe(m) 264 defer m.cfg.Unsubscribe(m) 265 266 if err := m.initFolders(cfg); err != nil { 267 close(m.started) 268 return svcutil.AsFatalErr(err, svcutil.ExitError) 269 } 270 271 close(m.started) 272 273 select { 274 case <-ctx.Done(): 275 return ctx.Err() 276 case err := <-m.fatalChan: 277 return svcutil.AsFatalErr(err, svcutil.ExitError) 278 } 279} 280 281func (m *model) initFolders(cfg config.Configuration) error { 282 clusterConfigDevices := make(deviceIDSet, len(cfg.Devices)) 283 for _, folderCfg := range cfg.Folders { 284 if folderCfg.Paused { 285 folderCfg.CreateRoot() 286 continue 287 } 288 err := m.newFolder(folderCfg, cfg.Options.CacheIgnoredFiles) 289 if err != nil { 290 return err 291 } 292 clusterConfigDevices.add(folderCfg.DeviceIDs()) 293 } 294 295 ignoredDevices := observedDeviceSet(m.cfg.IgnoredDevices()) 296 m.cleanPending(cfg.DeviceMap(), cfg.FolderMap(), ignoredDevices, nil) 297 298 m.sendClusterConfig(clusterConfigDevices.AsSlice()) 299 return nil 300} 301 302func (m *model) closeAllConnectionsAndWait() { 303 m.pmut.RLock() 304 closed := make([]chan struct{}, 0, len(m.conn)) 305 for id, conn := range m.conn { 306 closed = append(closed, m.closed[id]) 307 go conn.Close(errStopped) 308 } 309 m.pmut.RUnlock() 310 for _, c := range closed { 311 <-c 312 } 313} 314 315func (m *model) fatal(err error) { 316 select { 317 case m.fatalChan <- err: 318 default: 319 } 320} 321 322// StartDeadlockDetector starts a deadlock detector on the models locks which 323// causes panics in case the locks cannot be acquired in the given timeout 324// period. 325func (m *model) StartDeadlockDetector(timeout time.Duration) { 326 l.Infof("Starting deadlock detector with %v timeout", timeout) 327 detector := newDeadlockDetector(timeout) 328 detector.Watch("fmut", m.fmut) 329 detector.Watch("pmut", m.pmut) 330} 331 332// Need to hold lock on m.fmut when calling this. 333func (m *model) addAndStartFolderLocked(cfg config.FolderConfiguration, fset *db.FileSet, cacheIgnoredFiles bool) { 334 ignores := ignore.New(cfg.Filesystem(), ignore.WithCache(cacheIgnoredFiles)) 335 if cfg.Type != config.FolderTypeReceiveEncrypted { 336 if err := ignores.Load(".stignore"); err != nil && !fs.IsNotExist(err) { 337 l.Warnln("Loading ignores:", err) 338 } 339 } 340 341 m.addAndStartFolderLockedWithIgnores(cfg, fset, ignores) 342} 343 344// Only needed for testing, use addAndStartFolderLocked instead. 345func (m *model) addAndStartFolderLockedWithIgnores(cfg config.FolderConfiguration, fset *db.FileSet, ignores *ignore.Matcher) { 346 m.folderCfgs[cfg.ID] = cfg 347 m.folderFiles[cfg.ID] = fset 348 m.folderIgnores[cfg.ID] = ignores 349 350 _, ok := m.folderRunners[cfg.ID] 351 if ok { 352 l.Warnln("Cannot start already running folder", cfg.Description()) 353 panic("cannot start already running folder") 354 } 355 356 folderFactory, ok := folderFactories[cfg.Type] 357 if !ok { 358 panic(fmt.Sprintf("unknown folder type 0x%x", cfg.Type)) 359 } 360 361 folder := cfg.ID 362 363 // Find any devices for which we hold the index in the db, but the folder 364 // is not shared, and drop it. 365 expected := mapDevices(cfg.DeviceIDs()) 366 for _, available := range fset.ListDevices() { 367 if _, ok := expected[available]; !ok { 368 l.Debugln("dropping", folder, "state for", available) 369 fset.Drop(available) 370 } 371 } 372 373 v, ok := fset.Sequence(protocol.LocalDeviceID), true 374 indexHasFiles := ok && v > 0 375 if !indexHasFiles { 376 // It's a blank folder, so this may the first time we're looking at 377 // it. Attempt to create and tag with our marker as appropriate. We 378 // don't really do anything with errors at this point except warn - 379 // if these things don't work, we still want to start the folder and 380 // it'll show up as errored later. 381 382 if err := cfg.CreateRoot(); err != nil { 383 l.Warnln("Failed to create folder root directory", err) 384 } else if err = cfg.CreateMarker(); err != nil { 385 l.Warnln("Failed to create folder marker:", err) 386 } 387 } 388 389 if cfg.Type == config.FolderTypeReceiveEncrypted { 390 if encryptionToken, err := readEncryptionToken(cfg); err == nil { 391 m.folderEncryptionPasswordTokens[folder] = encryptionToken 392 } else if !fs.IsNotExist(err) { 393 l.Warnf("Failed to read encryption token: %v", err) 394 } 395 } 396 397 // These are our metadata files, and they should always be hidden. 398 ffs := cfg.Filesystem() 399 _ = ffs.Hide(config.DefaultMarkerName) 400 _ = ffs.Hide(".stversions") 401 _ = ffs.Hide(".stignore") 402 403 var ver versioner.Versioner 404 if cfg.Versioning.Type != "" { 405 var err error 406 ver, err = versioner.New(cfg) 407 if err != nil { 408 panic(fmt.Errorf("creating versioner: %w", err)) 409 } 410 } 411 m.folderVersioners[folder] = ver 412 413 p := folderFactory(m, fset, ignores, cfg, ver, m.evLogger, m.folderIOLimiter) 414 415 m.folderRunners[folder] = p 416 417 m.warnAboutOverwritingProtectedFiles(cfg, ignores) 418 419 m.folderRunnerToken[folder] = m.Add(p) 420 421 l.Infof("Ready to synchronize %s (%s)", cfg.Description(), cfg.Type) 422} 423 424func (m *model) warnAboutOverwritingProtectedFiles(cfg config.FolderConfiguration, ignores *ignore.Matcher) { 425 if cfg.Type == config.FolderTypeSendOnly { 426 return 427 } 428 429 // This is a bit of a hack. 430 ffs := cfg.Filesystem() 431 if ffs.Type() != fs.FilesystemTypeBasic { 432 return 433 } 434 folderLocation := ffs.URI() 435 436 var filesAtRisk []string 437 for _, protectedFilePath := range m.protectedFiles { 438 // check if file is synced in this folder 439 if protectedFilePath != folderLocation && !fs.IsParent(protectedFilePath, folderLocation) { 440 continue 441 } 442 443 // check if file is ignored 444 relPath, _ := filepath.Rel(folderLocation, protectedFilePath) 445 if ignores.Match(relPath).IsIgnored() { 446 continue 447 } 448 449 filesAtRisk = append(filesAtRisk, protectedFilePath) 450 } 451 452 if len(filesAtRisk) > 0 { 453 l.Warnln("Some protected files may be overwritten and cause issues. See https://docs.syncthing.net/users/config.html#syncing-configuration-files for more information. The at risk files are:", strings.Join(filesAtRisk, ", ")) 454 } 455} 456 457func (m *model) removeFolder(cfg config.FolderConfiguration) { 458 m.fmut.RLock() 459 token, ok := m.folderRunnerToken[cfg.ID] 460 m.fmut.RUnlock() 461 if ok { 462 m.RemoveAndWait(token, 0) 463 } 464 465 // We need to hold both fmut and pmut and must acquire locks in the same 466 // order always. (The locks can be *released* in any order.) 467 m.fmut.Lock() 468 m.pmut.RLock() 469 470 isPathUnique := true 471 for folderID, folderCfg := range m.folderCfgs { 472 if folderID != cfg.ID && folderCfg.Path == cfg.Path { 473 isPathUnique = false 474 break 475 } 476 } 477 if isPathUnique { 478 // Remove (if empty and removable) or move away (if non-empty or 479 // otherwise not removable) Syncthing-specific marker files. 480 fs := cfg.Filesystem() 481 if err := fs.Remove(config.DefaultMarkerName); err != nil { 482 moved := config.DefaultMarkerName + time.Now().Format(".removed-20060102-150405") 483 _ = fs.Rename(config.DefaultMarkerName, moved) 484 } 485 } 486 487 m.cleanupFolderLocked(cfg) 488 for _, r := range m.indexHandlers { 489 r.Remove(cfg.ID) 490 } 491 492 m.fmut.Unlock() 493 m.pmut.RUnlock() 494 495 // Remove it from the database 496 db.DropFolder(m.db, cfg.ID) 497} 498 499// Need to hold lock on m.fmut when calling this. 500func (m *model) cleanupFolderLocked(cfg config.FolderConfiguration) { 501 // clear up our config maps 502 delete(m.folderCfgs, cfg.ID) 503 delete(m.folderFiles, cfg.ID) 504 delete(m.folderIgnores, cfg.ID) 505 delete(m.folderRunners, cfg.ID) 506 delete(m.folderRunnerToken, cfg.ID) 507 delete(m.folderVersioners, cfg.ID) 508} 509 510func (m *model) restartFolder(from, to config.FolderConfiguration, cacheIgnoredFiles bool) error { 511 if len(to.ID) == 0 { 512 panic("bug: cannot restart empty folder ID") 513 } 514 if to.ID != from.ID { 515 l.Warnf("bug: folder restart cannot change ID %q -> %q", from.ID, to.ID) 516 panic("bug: folder restart cannot change ID") 517 } 518 folder := to.ID 519 520 // This mutex protects the entirety of the restart operation, preventing 521 // there from being more than one folder restart operation in progress 522 // at any given time. The usual fmut/pmut stuff doesn't cover this, 523 // because those locks are released while we are waiting for the folder 524 // to shut down (and must be so because the folder might need them as 525 // part of its operations before shutting down). 526 restartMut := m.folderRestartMuts.Get(folder) 527 restartMut.Lock() 528 defer restartMut.Unlock() 529 530 m.fmut.RLock() 531 token, ok := m.folderRunnerToken[from.ID] 532 m.fmut.RUnlock() 533 if ok { 534 m.RemoveAndWait(token, 0) 535 } 536 537 m.fmut.Lock() 538 defer m.fmut.Unlock() 539 540 // Cache the (maybe) existing fset before it's removed by cleanupFolderLocked 541 fset := m.folderFiles[folder] 542 fsetNil := fset == nil 543 544 m.cleanupFolderLocked(from) 545 if !to.Paused { 546 if fsetNil { 547 // Create a new fset. Might take a while and we do it under 548 // locking, but it's unsafe to create fset:s concurrently so 549 // that's the price we pay. 550 var err error 551 fset, err = db.NewFileSet(folder, to.Filesystem(), m.db) 552 if err != nil { 553 return fmt.Errorf("restarting %v: %w", to.Description(), err) 554 } 555 } 556 m.addAndStartFolderLocked(to, fset, cacheIgnoredFiles) 557 } 558 559 // Care needs to be taken because we already hold fmut and the lock order 560 // must be the same everywhere. As fmut is acquired first, this is fine. 561 m.pmut.RLock() 562 for _, indexRegistry := range m.indexHandlers { 563 indexRegistry.RegisterFolderState(to, fset, m.folderRunners[to.ID]) 564 } 565 m.pmut.RUnlock() 566 567 var infoMsg string 568 switch { 569 case to.Paused: 570 infoMsg = "Paused" 571 case from.Paused: 572 infoMsg = "Unpaused" 573 default: 574 infoMsg = "Restarted" 575 } 576 l.Infof("%v folder %v (%v)", infoMsg, to.Description(), to.Type) 577 578 return nil 579} 580 581func (m *model) newFolder(cfg config.FolderConfiguration, cacheIgnoredFiles bool) error { 582 // Creating the fileset can take a long time (metadata calculation) so 583 // we do it outside of the lock. 584 fset, err := db.NewFileSet(cfg.ID, cfg.Filesystem(), m.db) 585 if err != nil { 586 return fmt.Errorf("adding %v: %w", cfg.Description(), err) 587 } 588 589 m.fmut.Lock() 590 defer m.fmut.Unlock() 591 592 m.addAndStartFolderLocked(cfg, fset, cacheIgnoredFiles) 593 594 // Cluster configs might be received and processed before reaching this 595 // point, i.e. before the folder is started. If that's the case, start 596 // index senders here. 597 // Care needs to be taken because we already hold fmut and the lock order 598 // must be the same everywhere. As fmut is acquired first, this is fine. 599 m.pmut.RLock() 600 for _, indexRegistry := range m.indexHandlers { 601 indexRegistry.RegisterFolderState(cfg, fset, m.folderRunners[cfg.ID]) 602 } 603 m.pmut.RUnlock() 604 605 return nil 606} 607 608func (m *model) UsageReportingStats(report *contract.Report, version int, preview bool) { 609 if version >= 3 { 610 // Block stats 611 blockStatsMut.Lock() 612 for k, v := range blockStats { 613 switch k { 614 case "total": 615 report.BlockStats.Total = v 616 case "renamed": 617 report.BlockStats.Renamed = v 618 case "reused": 619 report.BlockStats.Reused = v 620 case "pulled": 621 report.BlockStats.Pulled = v 622 case "copyOrigin": 623 report.BlockStats.CopyOrigin = v 624 case "copyOriginShifted": 625 report.BlockStats.CopyOriginShifted = v 626 case "copyElsewhere": 627 report.BlockStats.CopyElsewhere = v 628 } 629 // Reset counts, as these are incremental 630 if !preview { 631 blockStats[k] = 0 632 } 633 } 634 blockStatsMut.Unlock() 635 636 // Transport stats 637 m.pmut.RLock() 638 for _, conn := range m.conn { 639 report.TransportStats[conn.Transport()]++ 640 } 641 m.pmut.RUnlock() 642 643 // Ignore stats 644 var seenPrefix [3]bool 645 for folder := range m.cfg.Folders() { 646 lines, _, err := m.CurrentIgnores(folder) 647 if err != nil { 648 continue 649 } 650 report.IgnoreStats.Lines += len(lines) 651 652 for _, line := range lines { 653 // Allow prefixes to be specified in any order, but only once. 654 for { 655 if strings.HasPrefix(line, "!") && !seenPrefix[0] { 656 seenPrefix[0] = true 657 line = line[1:] 658 report.IgnoreStats.Inverts++ 659 } else if strings.HasPrefix(line, "(?i)") && !seenPrefix[1] { 660 seenPrefix[1] = true 661 line = line[4:] 662 report.IgnoreStats.Folded++ 663 } else if strings.HasPrefix(line, "(?d)") && !seenPrefix[2] { 664 seenPrefix[2] = true 665 line = line[4:] 666 report.IgnoreStats.Deletable++ 667 } else { 668 seenPrefix[0] = false 669 seenPrefix[1] = false 670 seenPrefix[2] = false 671 break 672 } 673 } 674 675 // Noops, remove 676 line = strings.TrimSuffix(line, "**") 677 line = strings.TrimPrefix(line, "**/") 678 679 if strings.HasPrefix(line, "/") { 680 report.IgnoreStats.Rooted++ 681 } else if strings.HasPrefix(line, "#include ") { 682 report.IgnoreStats.Includes++ 683 if strings.Contains(line, "..") { 684 report.IgnoreStats.EscapedIncludes++ 685 } 686 } 687 688 if strings.Contains(line, "**") { 689 report.IgnoreStats.DoubleStars++ 690 // Remove not to trip up star checks. 691 line = strings.ReplaceAll(line, "**", "") 692 } 693 694 if strings.Contains(line, "*") { 695 report.IgnoreStats.Stars++ 696 } 697 } 698 } 699 } 700} 701 702type ConnectionInfo struct { 703 protocol.Statistics 704 Connected bool 705 Paused bool 706 Address string 707 ClientVersion string 708 Type string 709 Crypto string 710} 711 712func (info ConnectionInfo) MarshalJSON() ([]byte, error) { 713 return json.Marshal(map[string]interface{}{ 714 "at": info.At, 715 "inBytesTotal": info.InBytesTotal, 716 "outBytesTotal": info.OutBytesTotal, 717 "connected": info.Connected, 718 "paused": info.Paused, 719 "address": info.Address, 720 "clientVersion": info.ClientVersion, 721 "type": info.Type, 722 "crypto": info.Crypto, 723 }) 724} 725 726// NumConnections returns the current number of active connected devices. 727func (m *model) NumConnections() int { 728 m.pmut.RLock() 729 defer m.pmut.RUnlock() 730 return len(m.conn) 731} 732 733// ConnectionStats returns a map with connection statistics for each device. 734func (m *model) ConnectionStats() map[string]interface{} { 735 m.pmut.RLock() 736 defer m.pmut.RUnlock() 737 738 res := make(map[string]interface{}) 739 devs := m.cfg.Devices() 740 conns := make(map[string]ConnectionInfo, len(devs)) 741 for device, deviceCfg := range devs { 742 hello := m.helloMessages[device] 743 versionString := hello.ClientVersion 744 if hello.ClientName != "syncthing" { 745 versionString = hello.ClientName + " " + hello.ClientVersion 746 } 747 ci := ConnectionInfo{ 748 ClientVersion: strings.TrimSpace(versionString), 749 Paused: deviceCfg.Paused, 750 } 751 if conn, ok := m.conn[device]; ok { 752 ci.Type = conn.Type() 753 ci.Crypto = conn.Crypto() 754 ci.Connected = ok 755 ci.Statistics = conn.Statistics() 756 if addr := conn.RemoteAddr(); addr != nil { 757 ci.Address = addr.String() 758 } 759 } 760 761 conns[device.String()] = ci 762 } 763 764 res["connections"] = conns 765 766 in, out := protocol.TotalInOut() 767 res["total"] = ConnectionInfo{ 768 Statistics: protocol.Statistics{ 769 At: time.Now().Truncate(time.Second), 770 InBytesTotal: in, 771 OutBytesTotal: out, 772 }, 773 } 774 775 return res 776} 777 778// DeviceStatistics returns statistics about each device 779func (m *model) DeviceStatistics() (map[protocol.DeviceID]stats.DeviceStatistics, error) { 780 m.fmut.RLock() 781 defer m.fmut.RUnlock() 782 res := make(map[protocol.DeviceID]stats.DeviceStatistics, len(m.deviceStatRefs)) 783 for id, sr := range m.deviceStatRefs { 784 stats, err := sr.GetStatistics() 785 if err != nil { 786 return nil, err 787 } 788 res[id] = stats 789 } 790 return res, nil 791} 792 793// FolderStatistics returns statistics about each folder 794func (m *model) FolderStatistics() (map[string]stats.FolderStatistics, error) { 795 res := make(map[string]stats.FolderStatistics) 796 m.fmut.RLock() 797 defer m.fmut.RUnlock() 798 for id, runner := range m.folderRunners { 799 stats, err := runner.GetStatistics() 800 if err != nil { 801 return nil, err 802 } 803 res[id] = stats 804 } 805 return res, nil 806} 807 808type FolderCompletion struct { 809 CompletionPct float64 810 GlobalBytes int64 811 NeedBytes int64 812 GlobalItems int 813 NeedItems int 814 NeedDeletes int 815 Sequence int64 816} 817 818func newFolderCompletion(global, need db.Counts, sequence int64) FolderCompletion { 819 comp := FolderCompletion{ 820 GlobalBytes: global.Bytes, 821 NeedBytes: need.Bytes, 822 GlobalItems: global.Files + global.Directories + global.Symlinks, 823 NeedItems: need.Files + need.Directories + need.Symlinks, 824 NeedDeletes: need.Deleted, 825 Sequence: sequence, 826 } 827 comp.setComplectionPct() 828 return comp 829} 830 831func (comp *FolderCompletion) add(other FolderCompletion) { 832 comp.GlobalBytes += other.GlobalBytes 833 comp.NeedBytes += other.NeedBytes 834 comp.GlobalItems += other.GlobalItems 835 comp.NeedItems += other.NeedItems 836 comp.NeedDeletes += other.NeedDeletes 837 comp.setComplectionPct() 838} 839 840func (comp *FolderCompletion) setComplectionPct() { 841 if comp.GlobalBytes == 0 { 842 comp.CompletionPct = 100 843 } else { 844 needRatio := float64(comp.NeedBytes) / float64(comp.GlobalBytes) 845 comp.CompletionPct = 100 * (1 - needRatio) 846 } 847 848 // If the completion is 100% but there are deletes we need to handle, 849 // drop it down a notch. Hack for consumers that look only at the 850 // percentage (our own GUI does the same calculation as here on its own 851 // and needs the same fixup). 852 if comp.NeedBytes == 0 && comp.NeedDeletes > 0 { 853 comp.CompletionPct = 95 // chosen by fair dice roll 854 } 855} 856 857// Map returns the members as a map, e.g. used in api to serialize as Json. 858func (comp FolderCompletion) Map() map[string]interface{} { 859 return map[string]interface{}{ 860 "completion": comp.CompletionPct, 861 "globalBytes": comp.GlobalBytes, 862 "needBytes": comp.NeedBytes, 863 "globalItems": comp.GlobalItems, 864 "needItems": comp.NeedItems, 865 "needDeletes": comp.NeedDeletes, 866 "sequence": comp.Sequence, 867 } 868} 869 870// Completion returns the completion status, in percent with some counters, 871// for the given device and folder. The device can be any known device ID 872// (including the local device) or explicitly protocol.LocalDeviceID. An 873// empty folder string means the aggregate of all folders shared with the 874// given device. 875func (m *model) Completion(device protocol.DeviceID, folder string) (FolderCompletion, error) { 876 // The user specifically asked for our own device ID. Internally that is 877 // known as protocol.LocalDeviceID so translate. 878 if device == m.id { 879 device = protocol.LocalDeviceID 880 } 881 882 if folder != "" { 883 // We want completion for a specific folder. 884 return m.folderCompletion(device, folder) 885 } 886 887 // We want completion for all (shared) folders as an aggregate. 888 var comp FolderCompletion 889 for _, fcfg := range m.cfg.FolderList() { 890 if device == protocol.LocalDeviceID || fcfg.SharedWith(device) { 891 folderComp, err := m.folderCompletion(device, fcfg.ID) 892 if err != nil { 893 return FolderCompletion{}, err 894 } 895 comp.add(folderComp) 896 } 897 } 898 return comp, nil 899} 900 901func (m *model) folderCompletion(device protocol.DeviceID, folder string) (FolderCompletion, error) { 902 m.fmut.RLock() 903 err := m.checkFolderRunningLocked(folder) 904 rf := m.folderFiles[folder] 905 m.fmut.RUnlock() 906 if err != nil { 907 return FolderCompletion{}, err 908 } 909 910 snap, err := rf.Snapshot() 911 if err != nil { 912 return FolderCompletion{}, err 913 } 914 defer snap.Release() 915 916 m.pmut.RLock() 917 downloaded := m.deviceDownloads[device].BytesDownloaded(folder) 918 m.pmut.RUnlock() 919 920 need := snap.NeedSize(device) 921 need.Bytes -= downloaded 922 // This might might be more than it really is, because some blocks can be of a smaller size. 923 if need.Bytes < 0 { 924 need.Bytes = 0 925 } 926 927 comp := newFolderCompletion(snap.GlobalSize(), need, snap.Sequence(device)) 928 929 l.Debugf("%v Completion(%s, %q): %v", m, device, folder, comp.Map()) 930 return comp, nil 931} 932 933// DBSnapshot returns a snapshot of the database content relevant to the given folder. 934func (m *model) DBSnapshot(folder string) (*db.Snapshot, error) { 935 m.fmut.RLock() 936 err := m.checkFolderRunningLocked(folder) 937 rf := m.folderFiles[folder] 938 m.fmut.RUnlock() 939 if err != nil { 940 return nil, err 941 } 942 return rf.Snapshot() 943} 944 945func (m *model) FolderProgressBytesCompleted(folder string) int64 { 946 return m.progressEmitter.BytesCompleted(folder) 947} 948 949// NeedFolderFiles returns paginated list of currently needed files in 950// progress, queued, and to be queued on next puller iteration. 951func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, error) { 952 m.fmut.RLock() 953 rf, rfOk := m.folderFiles[folder] 954 runner, runnerOk := m.folderRunners[folder] 955 cfg := m.folderCfgs[folder] 956 m.fmut.RUnlock() 957 958 if !rfOk { 959 return nil, nil, nil, ErrFolderMissing 960 } 961 962 snap, err := rf.Snapshot() 963 if err != nil { 964 return nil, nil, nil, err 965 } 966 defer snap.Release() 967 var progress, queued, rest []db.FileInfoTruncated 968 var seen map[string]struct{} 969 970 p := newPager(page, perpage) 971 972 if runnerOk { 973 progressNames, queuedNames, skipped := runner.Jobs(page, perpage) 974 975 progress = make([]db.FileInfoTruncated, len(progressNames)) 976 queued = make([]db.FileInfoTruncated, len(queuedNames)) 977 seen = make(map[string]struct{}, len(progressNames)+len(queuedNames)) 978 979 for i, name := range progressNames { 980 if f, ok := snap.GetGlobalTruncated(name); ok { 981 progress[i] = f 982 seen[name] = struct{}{} 983 } 984 } 985 986 for i, name := range queuedNames { 987 if f, ok := snap.GetGlobalTruncated(name); ok { 988 queued[i] = f 989 seen[name] = struct{}{} 990 } 991 } 992 993 p.get -= len(seen) 994 if p.get == 0 { 995 return progress, queued, nil, nil 996 } 997 p.toSkip -= skipped 998 } 999 1000 rest = make([]db.FileInfoTruncated, 0, perpage) 1001 snap.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool { 1002 if cfg.IgnoreDelete && f.IsDeleted() { 1003 return true 1004 } 1005 1006 if p.skip() { 1007 return true 1008 } 1009 ft := f.(db.FileInfoTruncated) 1010 if _, ok := seen[ft.Name]; !ok { 1011 rest = append(rest, ft) 1012 p.get-- 1013 } 1014 return p.get > 0 1015 }) 1016 1017 return progress, queued, rest, nil 1018} 1019 1020// RemoteNeedFolderFiles returns paginated list of currently needed files in 1021// progress, queued, and to be queued on next puller iteration, as well as the 1022// total number of files currently needed. 1023func (m *model) RemoteNeedFolderFiles(folder string, device protocol.DeviceID, page, perpage int) ([]db.FileInfoTruncated, error) { 1024 m.fmut.RLock() 1025 rf, ok := m.folderFiles[folder] 1026 m.fmut.RUnlock() 1027 1028 if !ok { 1029 return nil, ErrFolderMissing 1030 } 1031 1032 snap, err := rf.Snapshot() 1033 if err != nil { 1034 return nil, err 1035 } 1036 defer snap.Release() 1037 1038 files := make([]db.FileInfoTruncated, 0, perpage) 1039 p := newPager(page, perpage) 1040 snap.WithNeedTruncated(device, func(f protocol.FileIntf) bool { 1041 if p.skip() { 1042 return true 1043 } 1044 files = append(files, f.(db.FileInfoTruncated)) 1045 return !p.done() 1046 }) 1047 return files, nil 1048} 1049 1050func (m *model) LocalChangedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, error) { 1051 m.fmut.RLock() 1052 rf, ok := m.folderFiles[folder] 1053 cfg := m.folderCfgs[folder] 1054 m.fmut.RUnlock() 1055 1056 if !ok { 1057 return nil, ErrFolderMissing 1058 } 1059 1060 snap, err := rf.Snapshot() 1061 if err != nil { 1062 return nil, err 1063 } 1064 defer snap.Release() 1065 1066 if snap.ReceiveOnlyChangedSize().TotalItems() == 0 { 1067 return nil, nil 1068 } 1069 1070 p := newPager(page, perpage) 1071 recvEnc := cfg.Type == config.FolderTypeReceiveEncrypted 1072 files := make([]db.FileInfoTruncated, 0, perpage) 1073 1074 snap.WithHaveTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool { 1075 if !f.IsReceiveOnlyChanged() || (recvEnc && f.IsDeleted()) { 1076 return true 1077 } 1078 if p.skip() { 1079 return true 1080 } 1081 ft := f.(db.FileInfoTruncated) 1082 files = append(files, ft) 1083 return !p.done() 1084 }) 1085 1086 return files, nil 1087} 1088 1089type pager struct { 1090 toSkip, get int 1091} 1092 1093func newPager(page, perpage int) *pager { 1094 return &pager{ 1095 toSkip: (page - 1) * perpage, 1096 get: perpage, 1097 } 1098} 1099 1100func (p *pager) skip() bool { 1101 if p.toSkip == 0 { 1102 return false 1103 } 1104 p.toSkip-- 1105 return true 1106} 1107 1108func (p *pager) done() bool { 1109 if p.get > 0 { 1110 p.get-- 1111 } 1112 return p.get == 0 1113} 1114 1115// Index is called when a new device is connected and we receive their full index. 1116// Implements the protocol.Model interface. 1117func (m *model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.FileInfo) error { 1118 return m.handleIndex(deviceID, folder, fs, false) 1119} 1120 1121// IndexUpdate is called for incremental updates to connected devices' indexes. 1122// Implements the protocol.Model interface. 1123func (m *model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []protocol.FileInfo) error { 1124 return m.handleIndex(deviceID, folder, fs, true) 1125} 1126 1127func (m *model) handleIndex(deviceID protocol.DeviceID, folder string, fs []protocol.FileInfo, update bool) error { 1128 op := "Index" 1129 if update { 1130 op += " update" 1131 } 1132 1133 l.Debugf("%v (in): %s / %q: %d files", op, deviceID, folder, len(fs)) 1134 1135 if cfg, ok := m.cfg.Folder(folder); !ok || !cfg.SharedWith(deviceID) { 1136 l.Infof("%v for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", op, folder, deviceID) 1137 return errors.Wrap(ErrFolderMissing, folder) 1138 } else if cfg.Paused { 1139 l.Debugf("%v for paused folder (ID %q) sent from device %q.", op, folder, deviceID) 1140 return errors.Wrap(ErrFolderPaused, folder) 1141 } 1142 1143 m.pmut.RLock() 1144 indexHandler, ok := m.indexHandlers[deviceID] 1145 m.pmut.RUnlock() 1146 if !ok { 1147 // This should be impossible, as an index handler always exists for an 1148 // open connection, and this method can't be called on a closed 1149 // connection 1150 m.evLogger.Log(events.Failure, "index sender does not exist for connection on which indexes were received") 1151 l.Debugf("%v for folder (ID %q) sent from device %q: missing index handler", op, folder, deviceID) 1152 return errors.Wrap(errors.New("index handler missing"), folder) 1153 } 1154 1155 return indexHandler.ReceiveIndex(folder, fs, update, op) 1156} 1157 1158type clusterConfigDeviceInfo struct { 1159 local, remote protocol.Device 1160} 1161 1162func (m *model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterConfig) error { 1163 // Check the peer device's announced folders against our own. Emits events 1164 // for folders that we don't expect (unknown or not shared). 1165 // Also, collect a list of folders we do share, and if he's interested in 1166 // temporary indexes, subscribe the connection. 1167 1168 l.Debugf("Handling ClusterConfig from %v", deviceID.Short()) 1169 1170 m.pmut.RLock() 1171 indexHandlerRegistry, ok := m.indexHandlers[deviceID] 1172 m.pmut.RUnlock() 1173 if !ok { 1174 panic("bug: ClusterConfig called on closed or nonexistent connection") 1175 } 1176 1177 deviceCfg, ok := m.cfg.Device(deviceID) 1178 if !ok { 1179 l.Debugln("Device disappeared from config while processing cluster-config") 1180 return errDeviceUnknown 1181 } 1182 1183 // Assemble the device information from the connected device about 1184 // themselves and us for all folders. 1185 ccDeviceInfos := make(map[string]*clusterConfigDeviceInfo, len(cm.Folders)) 1186 for _, folder := range cm.Folders { 1187 info := &clusterConfigDeviceInfo{} 1188 for _, dev := range folder.Devices { 1189 if dev.ID == m.id { 1190 info.local = dev 1191 } else if dev.ID == deviceID { 1192 info.remote = dev 1193 } 1194 if info.local.ID != protocol.EmptyDeviceID && info.remote.ID != protocol.EmptyDeviceID { 1195 break 1196 } 1197 } 1198 if info.remote.ID == protocol.EmptyDeviceID { 1199 l.Infof("Device %v sent cluster-config without the device info for the remote on folder %v", deviceID, folder.Description()) 1200 return errMissingRemoteInClusterConfig 1201 } 1202 if info.local.ID == protocol.EmptyDeviceID { 1203 l.Infof("Device %v sent cluster-config without the device info for us locally on folder %v", deviceID, folder.Description()) 1204 return errMissingLocalInClusterConfig 1205 } 1206 ccDeviceInfos[folder.ID] = info 1207 } 1208 1209 // Needs to happen outside of the fmut, as can cause CommitConfiguration 1210 if deviceCfg.AutoAcceptFolders { 1211 w, _ := m.cfg.Modify(func(cfg *config.Configuration) { 1212 changedFcfg := make(map[string]config.FolderConfiguration) 1213 haveFcfg := cfg.FolderMap() 1214 for _, folder := range cm.Folders { 1215 from, ok := haveFcfg[folder.ID] 1216 if to, changed := m.handleAutoAccepts(deviceID, folder, ccDeviceInfos[folder.ID], from, ok, cfg.Defaults.Folder.Path); changed { 1217 changedFcfg[folder.ID] = to 1218 } 1219 } 1220 if len(changedFcfg) == 0 { 1221 return 1222 } 1223 for i := range cfg.Folders { 1224 if fcfg, ok := changedFcfg[cfg.Folders[i].ID]; ok { 1225 cfg.Folders[i] = fcfg 1226 delete(changedFcfg, cfg.Folders[i].ID) 1227 } 1228 } 1229 for _, fcfg := range changedFcfg { 1230 cfg.Folders = append(cfg.Folders, fcfg) 1231 } 1232 }) 1233 // Need to wait for the waiter, as this calls CommitConfiguration, 1234 // which sets up the folder and as we return from this call, 1235 // ClusterConfig starts poking at m.folderFiles and other things 1236 // that might not exist until the config is committed. 1237 w.Wait() 1238 } 1239 1240 tempIndexFolders, paused, err := m.ccHandleFolders(cm.Folders, deviceCfg, ccDeviceInfos, indexHandlerRegistry) 1241 if err != nil { 1242 return err 1243 } 1244 1245 m.pmut.Lock() 1246 m.remotePausedFolders[deviceID] = paused 1247 m.pmut.Unlock() 1248 1249 if len(tempIndexFolders) > 0 { 1250 m.pmut.RLock() 1251 conn, ok := m.conn[deviceID] 1252 m.pmut.RUnlock() 1253 // In case we've got ClusterConfig, and the connection disappeared 1254 // from infront of our nose. 1255 if ok { 1256 m.progressEmitter.temporaryIndexSubscribe(conn, tempIndexFolders) 1257 } 1258 } 1259 1260 if deviceCfg.Introducer { 1261 m.cfg.Modify(func(cfg *config.Configuration) { 1262 folders, devices, foldersDevices, introduced := m.handleIntroductions(deviceCfg, cm, cfg.FolderMap(), cfg.DeviceMap()) 1263 folders, devices, deintroduced := m.handleDeintroductions(deviceCfg, foldersDevices, folders, devices) 1264 if !introduced && !deintroduced { 1265 return 1266 } 1267 cfg.Folders = make([]config.FolderConfiguration, 0, len(folders)) 1268 for _, fcfg := range folders { 1269 cfg.Folders = append(cfg.Folders, fcfg) 1270 } 1271 cfg.Devices = make([]config.DeviceConfiguration, len(devices)) 1272 for _, dcfg := range devices { 1273 cfg.Devices = append(cfg.Devices, dcfg) 1274 } 1275 }) 1276 } 1277 1278 return nil 1279} 1280 1281func (m *model) ccHandleFolders(folders []protocol.Folder, deviceCfg config.DeviceConfiguration, ccDeviceInfos map[string]*clusterConfigDeviceInfo, indexHandlers *indexHandlerRegistry) ([]string, map[string]struct{}, error) { 1282 var folderDevice config.FolderDeviceConfiguration 1283 tempIndexFolders := make([]string, 0, len(folders)) 1284 paused := make(map[string]struct{}, len(folders)) 1285 seenFolders := make(map[string]struct{}, len(folders)) 1286 updatedPending := make([]updatedPendingFolder, 0, len(folders)) 1287 deviceID := deviceCfg.DeviceID 1288 expiredPending, err := m.db.PendingFoldersForDevice(deviceID) 1289 if err != nil { 1290 l.Infof("Could not get pending folders for cleanup: %v", err) 1291 } 1292 of := db.ObservedFolder{Time: time.Now().Truncate(time.Second)} 1293 for _, folder := range folders { 1294 seenFolders[folder.ID] = struct{}{} 1295 1296 cfg, ok := m.cfg.Folder(folder.ID) 1297 if ok { 1298 folderDevice, ok = cfg.Device(deviceID) 1299 } 1300 if !ok { 1301 indexHandlers.Remove(folder.ID) 1302 if deviceCfg.IgnoredFolder(folder.ID) { 1303 l.Infof("Ignoring folder %s from device %s since we are configured to", folder.Description(), deviceID) 1304 continue 1305 } 1306 delete(expiredPending, folder.ID) 1307 of.Label = folder.Label 1308 of.ReceiveEncrypted = len(ccDeviceInfos[folder.ID].local.EncryptionPasswordToken) > 0 1309 of.RemoteEncrypted = len(ccDeviceInfos[folder.ID].remote.EncryptionPasswordToken) > 0 1310 if err := m.db.AddOrUpdatePendingFolder(folder.ID, of, deviceID); err != nil { 1311 l.Warnf("Failed to persist pending folder entry to database: %v", err) 1312 } 1313 if !folder.Paused { 1314 indexHandlers.AddIndexInfo(folder.ID, ccDeviceInfos[folder.ID]) 1315 } 1316 updatedPending = append(updatedPending, updatedPendingFolder{ 1317 FolderID: folder.ID, 1318 FolderLabel: folder.Label, 1319 DeviceID: deviceID, 1320 ReceiveEncrypted: of.ReceiveEncrypted, 1321 RemoteEncrypted: of.RemoteEncrypted, 1322 }) 1323 // DEPRECATED: Only for backwards compatibility, should be removed. 1324 m.evLogger.Log(events.FolderRejected, map[string]string{ 1325 "folder": folder.ID, 1326 "folderLabel": folder.Label, 1327 "device": deviceID.String(), 1328 }) 1329 l.Infof("Unexpected folder %s sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder.Description(), deviceID) 1330 continue 1331 } 1332 1333 if folder.Paused { 1334 indexHandlers.Remove(folder.ID) 1335 paused[cfg.ID] = struct{}{} 1336 continue 1337 } 1338 1339 if cfg.Paused { 1340 indexHandlers.AddIndexInfo(folder.ID, ccDeviceInfos[folder.ID]) 1341 continue 1342 } 1343 1344 if err := m.ccCheckEncryption(cfg, folderDevice, ccDeviceInfos[folder.ID], deviceCfg.Untrusted); err != nil { 1345 sameError := false 1346 m.fmut.Lock() 1347 if devs, ok := m.folderEncryptionFailures[folder.ID]; ok { 1348 sameError = devs[deviceID] == err 1349 } else { 1350 m.folderEncryptionFailures[folder.ID] = make(map[protocol.DeviceID]error) 1351 } 1352 m.folderEncryptionFailures[folder.ID][deviceID] = err 1353 m.fmut.Unlock() 1354 msg := fmt.Sprintf("Failure checking encryption consistency with device %v for folder %v: %v", deviceID, cfg.Description(), err) 1355 if sameError { 1356 l.Debugln(msg) 1357 } else { 1358 if rerr, ok := err.(*redactedError); ok { 1359 err = rerr.redacted 1360 } 1361 m.evLogger.Log(events.Failure, err.Error()) 1362 l.Warnln(msg) 1363 } 1364 return tempIndexFolders, paused, err 1365 } 1366 m.fmut.Lock() 1367 if devErrs, ok := m.folderEncryptionFailures[folder.ID]; ok { 1368 if len(devErrs) == 1 { 1369 delete(m.folderEncryptionFailures, folder.ID) 1370 } else { 1371 delete(m.folderEncryptionFailures[folder.ID], deviceID) 1372 } 1373 } 1374 m.fmut.Unlock() 1375 1376 // Handle indexes 1377 1378 if !folder.DisableTempIndexes { 1379 tempIndexFolders = append(tempIndexFolders, folder.ID) 1380 } 1381 1382 indexHandlers.AddIndexInfo(folder.ID, ccDeviceInfos[folder.ID]) 1383 } 1384 1385 indexHandlers.RemoveAllExcept(seenFolders) 1386 expiredPendingList := make([]map[string]string, 0, len(expiredPending)) 1387 for folder := range expiredPending { 1388 if err = m.db.RemovePendingFolderForDevice(folder, deviceID); err != nil { 1389 msg := "Failed to remove pending folder-device entry" 1390 l.Warnf("%v (%v, %v): %v", msg, folder, deviceID, err) 1391 m.evLogger.Log(events.Failure, msg) 1392 continue 1393 } 1394 expiredPendingList = append(expiredPendingList, map[string]string{ 1395 "folderID": folder, 1396 "deviceID": deviceID.String(), 1397 }) 1398 } 1399 if len(updatedPending) > 0 || len(expiredPendingList) > 0 { 1400 m.evLogger.Log(events.PendingFoldersChanged, map[string]interface{}{ 1401 "added": updatedPending, 1402 "removed": expiredPendingList, 1403 }) 1404 } 1405 1406 return tempIndexFolders, paused, nil 1407} 1408 1409func (m *model) ccCheckEncryption(fcfg config.FolderConfiguration, folderDevice config.FolderDeviceConfiguration, ccDeviceInfos *clusterConfigDeviceInfo, deviceUntrusted bool) error { 1410 hasTokenRemote := len(ccDeviceInfos.remote.EncryptionPasswordToken) > 0 1411 hasTokenLocal := len(ccDeviceInfos.local.EncryptionPasswordToken) > 0 1412 isEncryptedRemote := folderDevice.EncryptionPassword != "" 1413 isEncryptedLocal := fcfg.Type == config.FolderTypeReceiveEncrypted 1414 1415 if !isEncryptedRemote && !isEncryptedLocal && deviceUntrusted { 1416 return errEncryptionNotEncryptedUntrusted 1417 } 1418 1419 if !(hasTokenRemote || hasTokenLocal || isEncryptedRemote || isEncryptedLocal) { 1420 // Noone cares about encryption here 1421 return nil 1422 } 1423 1424 if isEncryptedRemote && isEncryptedLocal { 1425 // Should never happen, but config racyness and be safe. 1426 return errEncryptionInvConfigLocal 1427 } 1428 1429 if hasTokenRemote && hasTokenLocal { 1430 return errEncryptionInvConfigRemote 1431 } 1432 1433 if !(hasTokenRemote || hasTokenLocal) { 1434 if isEncryptedRemote { 1435 return errEncryptionPlainForRemoteEncrypted 1436 } else { 1437 return errEncryptionPlainForReceiveEncrypted 1438 } 1439 } 1440 1441 if !(isEncryptedRemote || isEncryptedLocal) { 1442 return errEncryptionNotEncryptedLocal 1443 } 1444 1445 if isEncryptedRemote { 1446 passwordToken := protocol.PasswordToken(fcfg.ID, folderDevice.EncryptionPassword) 1447 match := false 1448 if hasTokenLocal { 1449 match = bytes.Equal(passwordToken, ccDeviceInfos.local.EncryptionPasswordToken) 1450 } else { 1451 // hasTokenRemote == true 1452 match = bytes.Equal(passwordToken, ccDeviceInfos.remote.EncryptionPasswordToken) 1453 } 1454 if !match { 1455 return errEncryptionPassword 1456 } 1457 return nil 1458 } 1459 1460 // isEncryptedLocal == true 1461 1462 var ccToken []byte 1463 if hasTokenLocal { 1464 ccToken = ccDeviceInfos.local.EncryptionPasswordToken 1465 } else { 1466 // hasTokenRemote == true 1467 ccToken = ccDeviceInfos.remote.EncryptionPasswordToken 1468 } 1469 m.fmut.RLock() 1470 token, ok := m.folderEncryptionPasswordTokens[fcfg.ID] 1471 m.fmut.RUnlock() 1472 if !ok { 1473 var err error 1474 token, err = readEncryptionToken(fcfg) 1475 if err != nil && !fs.IsNotExist(err) { 1476 if rerr, ok := redactPathError(err); ok { 1477 return rerr 1478 } 1479 return &redactedError{ 1480 error: err, 1481 redacted: errEncryptionTokenRead, 1482 } 1483 } 1484 if err == nil { 1485 m.fmut.Lock() 1486 m.folderEncryptionPasswordTokens[fcfg.ID] = token 1487 m.fmut.Unlock() 1488 } else { 1489 if err := writeEncryptionToken(ccToken, fcfg); err != nil { 1490 if rerr, ok := redactPathError(err); ok { 1491 return rerr 1492 } else { 1493 return &redactedError{ 1494 error: err, 1495 redacted: errEncryptionTokenWrite, 1496 } 1497 } 1498 } 1499 m.fmut.Lock() 1500 m.folderEncryptionPasswordTokens[fcfg.ID] = ccToken 1501 m.fmut.Unlock() 1502 // We can only announce ourselfs once we have the token, 1503 // thus we need to resend CCs now that we have it. 1504 m.sendClusterConfig(fcfg.DeviceIDs()) 1505 return nil 1506 } 1507 } 1508 if !bytes.Equal(token, ccToken) { 1509 return errEncryptionPassword 1510 } 1511 return nil 1512} 1513 1514func (m *model) sendClusterConfig(ids []protocol.DeviceID) { 1515 if len(ids) == 0 { 1516 return 1517 } 1518 ccConns := make([]protocol.Connection, 0, len(ids)) 1519 m.pmut.RLock() 1520 for _, id := range ids { 1521 if conn, ok := m.conn[id]; ok { 1522 ccConns = append(ccConns, conn) 1523 } 1524 } 1525 m.pmut.RUnlock() 1526 // Generating cluster-configs acquires fmut -> must happen outside of pmut. 1527 for _, conn := range ccConns { 1528 cm, passwords := m.generateClusterConfig(conn.ID()) 1529 conn.SetFolderPasswords(passwords) 1530 go conn.ClusterConfig(cm) 1531 } 1532} 1533 1534// handleIntroductions handles adding devices/folders that are shared by an introducer device 1535func (m *model) handleIntroductions(introducerCfg config.DeviceConfiguration, cm protocol.ClusterConfig, folders map[string]config.FolderConfiguration, devices map[protocol.DeviceID]config.DeviceConfiguration) (map[string]config.FolderConfiguration, map[protocol.DeviceID]config.DeviceConfiguration, folderDeviceSet, bool) { 1536 changed := false 1537 1538 foldersDevices := make(folderDeviceSet) 1539 1540 for _, folder := range cm.Folders { 1541 // Adds devices which we do not have, but the introducer has 1542 // for the folders that we have in common. Also, shares folders 1543 // with devices that we have in common, yet are currently not sharing 1544 // the folder. 1545 1546 fcfg, ok := folders[folder.ID] 1547 if !ok { 1548 // Don't have this folder, carry on. 1549 continue 1550 } 1551 1552 folderChanged := false 1553 1554 for _, device := range folder.Devices { 1555 // No need to share with self. 1556 if device.ID == m.id { 1557 continue 1558 } 1559 1560 foldersDevices.set(device.ID, folder.ID) 1561 1562 if _, ok := devices[device.ID]; !ok { 1563 // The device is currently unknown. Add it to the config. 1564 devices[device.ID] = m.introduceDevice(device, introducerCfg) 1565 } else if fcfg.SharedWith(device.ID) { 1566 // We already share the folder with this device, so 1567 // nothing to do. 1568 continue 1569 } 1570 1571 if fcfg.Type != config.FolderTypeReceiveEncrypted && device.EncryptionPasswordToken != nil { 1572 l.Infof("Cannot share folder %s with %v because the introducer %v encrypts data, which requires a password", folder.Description(), device.ID, introducerCfg.DeviceID) 1573 continue 1574 } 1575 1576 // We don't yet share this folder with this device. Add the device 1577 // to sharing list of the folder. 1578 l.Infof("Sharing folder %s with %v (vouched for by introducer %v)", folder.Description(), device.ID, introducerCfg.DeviceID) 1579 fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{ 1580 DeviceID: device.ID, 1581 IntroducedBy: introducerCfg.DeviceID, 1582 }) 1583 folderChanged = true 1584 } 1585 1586 if folderChanged { 1587 folders[fcfg.ID] = fcfg 1588 changed = true 1589 } 1590 } 1591 1592 return folders, devices, foldersDevices, changed 1593} 1594 1595// handleDeintroductions handles removals of devices/shares that are removed by an introducer device 1596func (m *model) handleDeintroductions(introducerCfg config.DeviceConfiguration, foldersDevices folderDeviceSet, folders map[string]config.FolderConfiguration, devices map[protocol.DeviceID]config.DeviceConfiguration) (map[string]config.FolderConfiguration, map[protocol.DeviceID]config.DeviceConfiguration, bool) { 1597 if introducerCfg.SkipIntroductionRemovals { 1598 return folders, devices, false 1599 } 1600 1601 changed := false 1602 devicesNotIntroduced := make(map[protocol.DeviceID]struct{}) 1603 1604 // Check if we should unshare some folders, if the introducer has unshared them. 1605 for folderID, folderCfg := range folders { 1606 for k := 0; k < len(folderCfg.Devices); k++ { 1607 if folderCfg.Devices[k].IntroducedBy != introducerCfg.DeviceID { 1608 devicesNotIntroduced[folderCfg.Devices[k].DeviceID] = struct{}{} 1609 continue 1610 } 1611 if !foldersDevices.has(folderCfg.Devices[k].DeviceID, folderCfg.ID) { 1612 // We could not find that folder shared on the 1613 // introducer with the device that was introduced to us. 1614 // We should follow and unshare as well. 1615 l.Infof("Unsharing folder %s with %v as introducer %v no longer shares the folder with that device", folderCfg.Description(), folderCfg.Devices[k].DeviceID, folderCfg.Devices[k].IntroducedBy) 1616 folderCfg.Devices = append(folderCfg.Devices[:k], folderCfg.Devices[k+1:]...) 1617 folders[folderID] = folderCfg 1618 k-- 1619 changed = true 1620 } 1621 } 1622 } 1623 1624 // Check if we should remove some devices, if the introducer no longer 1625 // shares any folder with them. Yet do not remove if we share other 1626 // folders that haven't been introduced by the introducer. 1627 for deviceID, device := range devices { 1628 if device.IntroducedBy == introducerCfg.DeviceID { 1629 if !foldersDevices.hasDevice(deviceID) { 1630 if _, ok := devicesNotIntroduced[deviceID]; !ok { 1631 // The introducer no longer shares any folder with the 1632 // device, remove the device. 1633 l.Infof("Removing device %v as introducer %v no longer shares any folders with that device", deviceID, device.IntroducedBy) 1634 changed = true 1635 delete(devices, deviceID) 1636 continue 1637 } 1638 l.Infof("Would have removed %v as %v no longer shares any folders, yet there are other folders that are shared with this device that haven't been introduced by this introducer.", deviceID, device.IntroducedBy) 1639 } 1640 } 1641 } 1642 1643 return folders, devices, changed 1644} 1645 1646// handleAutoAccepts handles adding and sharing folders for devices that have 1647// AutoAcceptFolders set to true. 1648func (m *model) handleAutoAccepts(deviceID protocol.DeviceID, folder protocol.Folder, ccDeviceInfos *clusterConfigDeviceInfo, cfg config.FolderConfiguration, haveCfg bool, defaultPath string) (config.FolderConfiguration, bool) { 1649 if !haveCfg { 1650 defaultPathFs := fs.NewFilesystem(fs.FilesystemTypeBasic, defaultPath) 1651 pathAlternatives := []string{ 1652 fs.SanitizePath(folder.Label), 1653 fs.SanitizePath(folder.ID), 1654 } 1655 for _, path := range pathAlternatives { 1656 if _, err := defaultPathFs.Lstat(path); !fs.IsNotExist(err) { 1657 continue 1658 } 1659 1660 fcfg := newFolderConfiguration(m.cfg, folder.ID, folder.Label, fs.FilesystemTypeBasic, filepath.Join(defaultPath, path)) 1661 fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{ 1662 DeviceID: deviceID, 1663 }) 1664 1665 if len(ccDeviceInfos.remote.EncryptionPasswordToken) > 0 || len(ccDeviceInfos.local.EncryptionPasswordToken) > 0 { 1666 fcfg.Type = config.FolderTypeReceiveEncrypted 1667 } 1668 1669 l.Infof("Auto-accepted %s folder %s at path %s", deviceID, folder.Description(), fcfg.Path) 1670 return fcfg, true 1671 } 1672 l.Infof("Failed to auto-accept folder %s from %s due to path conflict", folder.Description(), deviceID) 1673 return config.FolderConfiguration{}, false 1674 } else { 1675 for _, device := range cfg.DeviceIDs() { 1676 if device == deviceID { 1677 // Already shared nothing todo. 1678 return config.FolderConfiguration{}, false 1679 } 1680 } 1681 if cfg.Type == config.FolderTypeReceiveEncrypted { 1682 if len(ccDeviceInfos.remote.EncryptionPasswordToken) == 0 && len(ccDeviceInfos.local.EncryptionPasswordToken) == 0 { 1683 l.Infof("Failed to auto-accept device %s on existing folder %s as the remote wants to send us unencrypted data, but the folder type is receive-encrypted", folder.Description(), deviceID) 1684 return config.FolderConfiguration{}, false 1685 } 1686 } else { 1687 if len(ccDeviceInfos.remote.EncryptionPasswordToken) > 0 || len(ccDeviceInfos.local.EncryptionPasswordToken) > 0 { 1688 l.Infof("Failed to auto-accept device %s on existing folder %s as the remote wants to send us encrypted data, but the folder type is not receive-encrypted", folder.Description(), deviceID) 1689 return config.FolderConfiguration{}, false 1690 } 1691 } 1692 cfg.Devices = append(cfg.Devices, config.FolderDeviceConfiguration{ 1693 DeviceID: deviceID, 1694 }) 1695 l.Infof("Shared %s with %s due to auto-accept", folder.ID, deviceID) 1696 return cfg, true 1697 } 1698} 1699 1700func (m *model) introduceDevice(device protocol.Device, introducerCfg config.DeviceConfiguration) config.DeviceConfiguration { 1701 addresses := []string{"dynamic"} 1702 for _, addr := range device.Addresses { 1703 if addr != "dynamic" { 1704 addresses = append(addresses, addr) 1705 } 1706 } 1707 1708 l.Infof("Adding device %v to config (vouched for by introducer %v)", device.ID, introducerCfg.DeviceID) 1709 newDeviceCfg := m.cfg.DefaultDevice() 1710 newDeviceCfg.DeviceID = device.ID 1711 newDeviceCfg.Name = device.Name 1712 newDeviceCfg.Compression = introducerCfg.Compression 1713 newDeviceCfg.Addresses = addresses 1714 newDeviceCfg.CertName = device.CertName 1715 newDeviceCfg.IntroducedBy = introducerCfg.DeviceID 1716 1717 // The introducers' introducers are also our introducers. 1718 if device.Introducer { 1719 l.Infof("Device %v is now also an introducer", device.ID) 1720 newDeviceCfg.Introducer = true 1721 newDeviceCfg.SkipIntroductionRemovals = device.SkipIntroductionRemovals 1722 } 1723 1724 return newDeviceCfg 1725} 1726 1727// Closed is called when a connection has been closed 1728func (m *model) Closed(device protocol.DeviceID, err error) { 1729 m.pmut.Lock() 1730 conn, ok := m.conn[device] 1731 if !ok { 1732 m.pmut.Unlock() 1733 return 1734 } 1735 1736 delete(m.conn, device) 1737 delete(m.connRequestLimiters, device) 1738 delete(m.helloMessages, device) 1739 delete(m.deviceDownloads, device) 1740 delete(m.remotePausedFolders, device) 1741 closed := m.closed[device] 1742 delete(m.closed, device) 1743 delete(m.indexHandlers, device) 1744 m.pmut.Unlock() 1745 1746 m.progressEmitter.temporaryIndexUnsubscribe(conn) 1747 m.deviceDidClose(device, time.Since(conn.EstablishedAt())) 1748 1749 l.Infof("Connection to %s at %s closed: %v", device, conn, err) 1750 m.evLogger.Log(events.DeviceDisconnected, map[string]string{ 1751 "id": device.String(), 1752 "error": err.Error(), 1753 }) 1754 close(closed) 1755} 1756 1757// Implements protocol.RequestResponse 1758type requestResponse struct { 1759 data []byte 1760 closed chan struct{} 1761 once stdsync.Once 1762} 1763 1764func newRequestResponse(size int) *requestResponse { 1765 return &requestResponse{ 1766 data: protocol.BufferPool.Get(size), 1767 closed: make(chan struct{}), 1768 } 1769} 1770 1771func (r *requestResponse) Data() []byte { 1772 return r.data 1773} 1774 1775func (r *requestResponse) Close() { 1776 r.once.Do(func() { 1777 protocol.BufferPool.Put(r.data) 1778 close(r.closed) 1779 }) 1780} 1781 1782func (r *requestResponse) Wait() { 1783 <-r.closed 1784} 1785 1786// Request returns the specified data segment by reading it from local disk. 1787// Implements the protocol.Model interface. 1788func (m *model) Request(deviceID protocol.DeviceID, folder, name string, blockNo, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (out protocol.RequestResponse, err error) { 1789 if size < 0 || offset < 0 { 1790 return nil, protocol.ErrInvalid 1791 } 1792 1793 m.fmut.RLock() 1794 folderCfg, ok := m.folderCfgs[folder] 1795 folderIgnores := m.folderIgnores[folder] 1796 m.fmut.RUnlock() 1797 if !ok { 1798 // The folder might be already unpaused in the config, but not yet 1799 // in the model. 1800 l.Debugf("Request from %s for file %s in unstarted folder %q", deviceID, name, folder) 1801 return nil, protocol.ErrGeneric 1802 } 1803 1804 if !folderCfg.SharedWith(deviceID) { 1805 l.Warnf("Request from %s for file %s in unshared folder %q", deviceID, name, folder) 1806 return nil, protocol.ErrGeneric 1807 } 1808 if folderCfg.Paused { 1809 l.Debugf("Request from %s for file %s in paused folder %q", deviceID, name, folder) 1810 return nil, protocol.ErrGeneric 1811 } 1812 1813 // Make sure the path is valid and in canonical form 1814 if name, err = fs.Canonicalize(name); err != nil { 1815 l.Debugf("Request from %s in folder %q for invalid filename %s", deviceID, folder, name) 1816 return nil, protocol.ErrGeneric 1817 } 1818 1819 if deviceID != protocol.LocalDeviceID { 1820 l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d t=%v", m, deviceID, folder, name, offset, size, fromTemporary) 1821 } 1822 1823 if fs.IsInternal(name) { 1824 l.Debugf("%v REQ(in) for internal file: %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, size) 1825 return nil, protocol.ErrInvalid 1826 } 1827 1828 if folderIgnores.Match(name).IsIgnored() { 1829 l.Debugf("%v REQ(in) for ignored file: %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, size) 1830 return nil, protocol.ErrInvalid 1831 } 1832 1833 // Restrict parallel requests by connection/device 1834 1835 m.pmut.RLock() 1836 limiter := m.connRequestLimiters[deviceID] 1837 m.pmut.RUnlock() 1838 1839 // The requestResponse releases the bytes to the buffer pool and the 1840 // limiters when its Close method is called. 1841 res := newLimitedRequestResponse(int(size), limiter, m.globalRequestLimiter) 1842 1843 defer func() { 1844 // Close it ourselves if it isn't returned due to an error 1845 if err != nil { 1846 res.Close() 1847 } 1848 }() 1849 1850 // Grab the FS after limiting, as it causes I/O and we want to minimize 1851 // the race time between the symlink check and the read. 1852 1853 folderFs := folderCfg.Filesystem() 1854 1855 if err := osutil.TraversesSymlink(folderFs, filepath.Dir(name)); err != nil { 1856 l.Debugf("%v REQ(in) traversal check: %s - %s: %q / %q o=%d s=%d", m, err, deviceID, folder, name, offset, size) 1857 return nil, protocol.ErrNoSuchFile 1858 } 1859 1860 // Only check temp files if the flag is set, and if we are set to advertise 1861 // the temp indexes. 1862 if fromTemporary && !folderCfg.DisableTempIndexes { 1863 tempFn := fs.TempName(name) 1864 1865 if info, err := folderFs.Lstat(tempFn); err != nil || !info.IsRegular() { 1866 // Reject reads for anything that doesn't exist or is something 1867 // other than a regular file. 1868 l.Debugf("%v REQ(in) failed stating temp file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID, folder, name, offset, size) 1869 return nil, protocol.ErrNoSuchFile 1870 } 1871 _, err := readOffsetIntoBuf(folderFs, tempFn, offset, res.data) 1872 if err == nil && scanner.Validate(res.data, hash, weakHash) { 1873 return res, nil 1874 } 1875 // Fall through to reading from a non-temp file, just incase the temp 1876 // file has finished downloading. 1877 } 1878 1879 if info, err := folderFs.Lstat(name); err != nil || !info.IsRegular() { 1880 // Reject reads for anything that doesn't exist or is something 1881 // other than a regular file. 1882 l.Debugf("%v REQ(in) failed stating file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID, folder, name, offset, size) 1883 return nil, protocol.ErrNoSuchFile 1884 } 1885 1886 n, err := readOffsetIntoBuf(folderFs, name, offset, res.data) 1887 if fs.IsNotExist(err) { 1888 l.Debugf("%v REQ(in) file doesn't exist: %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, size) 1889 return nil, protocol.ErrNoSuchFile 1890 } else if err == io.EOF { 1891 // Read beyond end of file. This might indicate a problem, or it 1892 // might be a short block that gets padded when read for encrypted 1893 // folders. We ignore the error and let the hash validation in the 1894 // next step take care of it, by only hashing the part we actually 1895 // managed to read. 1896 } else if err != nil { 1897 l.Debugf("%v REQ(in) failed reading file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID, folder, name, offset, size) 1898 return nil, protocol.ErrGeneric 1899 } 1900 1901 if folderCfg.Type != config.FolderTypeReceiveEncrypted && len(hash) > 0 && !scanner.Validate(res.data[:n], hash, weakHash) { 1902 m.recheckFile(deviceID, folder, name, offset, hash, weakHash) 1903 l.Debugf("%v REQ(in) failed validating data: %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, size) 1904 return nil, protocol.ErrNoSuchFile 1905 } 1906 1907 return res, nil 1908} 1909 1910// newLimitedRequestResponse takes size bytes from the limiters in order, 1911// skipping nil limiters, then returns a requestResponse of the given size. 1912// When the requestResponse is closed the limiters are given back the bytes, 1913// in reverse order. 1914func newLimitedRequestResponse(size int, limiters ...*util.Semaphore) *requestResponse { 1915 multi := util.MultiSemaphore(limiters) 1916 multi.Take(size) 1917 1918 res := newRequestResponse(size) 1919 1920 go func() { 1921 res.Wait() 1922 multi.Give(size) 1923 }() 1924 1925 return res 1926} 1927 1928func (m *model) recheckFile(deviceID protocol.DeviceID, folder, name string, offset int64, hash []byte, weakHash uint32) { 1929 cf, ok, err := m.CurrentFolderFile(folder, name) 1930 if err != nil { 1931 l.Debugf("%v recheckFile: %s: %q / %q: current file error: %v", m, deviceID, folder, name, err) 1932 return 1933 } 1934 if !ok { 1935 l.Debugf("%v recheckFile: %s: %q / %q: no current file", m, deviceID, folder, name) 1936 return 1937 } 1938 1939 if cf.IsDeleted() || cf.IsInvalid() || cf.IsSymlink() || cf.IsDirectory() { 1940 l.Debugf("%v recheckFile: %s: %q / %q: not a regular file", m, deviceID, folder, name) 1941 return 1942 } 1943 1944 blockIndex := int(offset / int64(cf.BlockSize())) 1945 if blockIndex >= len(cf.Blocks) { 1946 l.Debugf("%v recheckFile: %s: %q / %q i=%d: block index too far", m, deviceID, folder, name, blockIndex) 1947 return 1948 } 1949 1950 block := cf.Blocks[blockIndex] 1951 1952 // Seems to want a different version of the file, whatever. 1953 if !bytes.Equal(block.Hash, hash) { 1954 l.Debugf("%v recheckFile: %s: %q / %q i=%d: hash mismatch %x != %x", m, deviceID, folder, name, blockIndex, block.Hash, hash) 1955 return 1956 } 1957 if weakHash != 0 && block.WeakHash != weakHash { 1958 l.Debugf("%v recheckFile: %s: %q / %q i=%d: weak hash mismatch %v != %v", m, deviceID, folder, name, blockIndex, block.WeakHash, weakHash) 1959 return 1960 } 1961 1962 // The hashes provided part of the request match what we expect to find according 1963 // to what we have in the database, yet the content we've read off the filesystem doesn't 1964 // Something is fishy, invalidate the file and rescan it. 1965 // The file will temporarily become invalid, which is ok as the content is messed up. 1966 m.fmut.RLock() 1967 runner, ok := m.folderRunners[folder] 1968 m.fmut.RUnlock() 1969 if !ok { 1970 l.Debugf("%v recheckFile: %s: %q / %q: Folder stopped before rescan could be scheduled", m, deviceID, folder, name) 1971 return 1972 } 1973 1974 runner.ScheduleForceRescan(name) 1975 1976 l.Debugf("%v recheckFile: %s: %q / %q", m, deviceID, folder, name) 1977} 1978 1979func (m *model) CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool, error) { 1980 m.fmut.RLock() 1981 fs, ok := m.folderFiles[folder] 1982 m.fmut.RUnlock() 1983 if !ok { 1984 return protocol.FileInfo{}, false, ErrFolderMissing 1985 } 1986 snap, err := fs.Snapshot() 1987 if err != nil { 1988 return protocol.FileInfo{}, false, err 1989 } 1990 f, ok := snap.Get(protocol.LocalDeviceID, file) 1991 snap.Release() 1992 return f, ok, nil 1993} 1994 1995func (m *model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool, error) { 1996 m.fmut.RLock() 1997 ffs, ok := m.folderFiles[folder] 1998 m.fmut.RUnlock() 1999 if !ok { 2000 return protocol.FileInfo{}, false, ErrFolderMissing 2001 } 2002 snap, err := ffs.Snapshot() 2003 if err != nil { 2004 return protocol.FileInfo{}, false, err 2005 } 2006 f, ok := snap.GetGlobal(file) 2007 snap.Release() 2008 return f, ok, nil 2009} 2010 2011func (m *model) GetMtimeMapping(folder string, file string) (fs.MtimeMapping, error) { 2012 m.fmut.RLock() 2013 ffs, ok := m.folderFiles[folder] 2014 m.fmut.RUnlock() 2015 if !ok { 2016 return fs.MtimeMapping{}, ErrFolderMissing 2017 } 2018 return fs.GetMtimeMapping(ffs.MtimeFS(), file) 2019} 2020 2021// Connection returns the current connection for device, and a boolean whether a connection was found. 2022func (m *model) Connection(deviceID protocol.DeviceID) (protocol.Connection, bool) { 2023 m.pmut.RLock() 2024 cn, ok := m.conn[deviceID] 2025 m.pmut.RUnlock() 2026 if ok { 2027 m.deviceWasSeen(deviceID) 2028 } 2029 return cn, ok 2030} 2031 2032// LoadIgnores loads or refreshes the ignore patterns from disk, if the 2033// folder is healthy, and returns the refreshed lines and patterns. 2034func (m *model) LoadIgnores(folder string) ([]string, []string, error) { 2035 m.fmut.RLock() 2036 cfg, cfgOk := m.folderCfgs[folder] 2037 ignores, ignoresOk := m.folderIgnores[folder] 2038 m.fmut.RUnlock() 2039 2040 if !cfgOk { 2041 cfg, cfgOk = m.cfg.Folder(folder) 2042 if !cfgOk { 2043 return nil, nil, fmt.Errorf("folder %s does not exist", folder) 2044 } 2045 } 2046 2047 if cfg.Type == config.FolderTypeReceiveEncrypted { 2048 return nil, nil, nil 2049 } 2050 2051 // On creation a new folder with ignore patterns validly has no marker yet. 2052 if err := cfg.CheckPath(); err != nil && err != config.ErrMarkerMissing { 2053 return nil, nil, err 2054 } 2055 2056 if !ignoresOk { 2057 ignores = ignore.New(cfg.Filesystem()) 2058 } 2059 2060 err := ignores.Load(".stignore") 2061 if fs.IsNotExist(err) { 2062 // Having no ignores is not an error. 2063 return nil, nil, nil 2064 } 2065 2066 // Return lines and patterns, which may have some meaning even when err 2067 // != nil, depending on the specific error. 2068 return ignores.Lines(), ignores.Patterns(), err 2069} 2070 2071// CurrentIgnores returns the currently loaded set of ignore patterns, 2072// whichever it may be. No attempt is made to load or refresh ignore 2073// patterns from disk. 2074func (m *model) CurrentIgnores(folder string) ([]string, []string, error) { 2075 m.fmut.RLock() 2076 _, cfgOk := m.folderCfgs[folder] 2077 ignores, ignoresOk := m.folderIgnores[folder] 2078 m.fmut.RUnlock() 2079 2080 if !cfgOk { 2081 return nil, nil, fmt.Errorf("folder %s does not exist", folder) 2082 } 2083 2084 if !ignoresOk { 2085 // Empty ignore patterns 2086 return []string{}, []string{}, nil 2087 } 2088 2089 return ignores.Lines(), ignores.Patterns(), nil 2090} 2091 2092func (m *model) SetIgnores(folder string, content []string) error { 2093 cfg, ok := m.cfg.Folder(folder) 2094 if !ok { 2095 return fmt.Errorf("folder %s does not exist", cfg.Description()) 2096 } 2097 2098 err := cfg.CheckPath() 2099 if err == config.ErrPathMissing { 2100 if err = cfg.CreateRoot(); err != nil { 2101 return errors.Wrap(err, "failed to create folder root") 2102 } 2103 err = cfg.CheckPath() 2104 } 2105 if err != nil && err != config.ErrMarkerMissing { 2106 return err 2107 } 2108 2109 if err := ignore.WriteIgnores(cfg.Filesystem(), ".stignore", content); err != nil { 2110 l.Warnln("Saving .stignore:", err) 2111 return err 2112 } 2113 2114 m.fmut.RLock() 2115 runner, ok := m.folderRunners[folder] 2116 m.fmut.RUnlock() 2117 if ok { 2118 return runner.Scan(nil) 2119 } 2120 return nil 2121} 2122 2123// OnHello is called when an device connects to us. 2124// This allows us to extract some information from the Hello message 2125// and add it to a list of known devices ahead of any checks. 2126func (m *model) OnHello(remoteID protocol.DeviceID, addr net.Addr, hello protocol.Hello) error { 2127 if m.cfg.IgnoredDevice(remoteID) { 2128 return errDeviceIgnored 2129 } 2130 2131 cfg, ok := m.cfg.Device(remoteID) 2132 if !ok { 2133 if err := m.db.AddOrUpdatePendingDevice(remoteID, hello.DeviceName, addr.String()); err != nil { 2134 l.Warnf("Failed to persist pending device entry to database: %v", err) 2135 } 2136 m.evLogger.Log(events.PendingDevicesChanged, map[string][]interface{}{ 2137 "added": {map[string]string{ 2138 "deviceID": remoteID.String(), 2139 "name": hello.DeviceName, 2140 "address": addr.String(), 2141 }}, 2142 }) 2143 // DEPRECATED: Only for backwards compatibility, should be removed. 2144 m.evLogger.Log(events.DeviceRejected, map[string]string{ 2145 "name": hello.DeviceName, 2146 "device": remoteID.String(), 2147 "address": addr.String(), 2148 }) 2149 return errDeviceUnknown 2150 } 2151 2152 if cfg.Paused { 2153 return errDevicePaused 2154 } 2155 2156 if len(cfg.AllowedNetworks) > 0 && !connections.IsAllowedNetwork(addr.String(), cfg.AllowedNetworks) { 2157 // The connection is not from an allowed network. 2158 return errNetworkNotAllowed 2159 } 2160 2161 if max := m.cfg.Options().ConnectionLimitMax; max > 0 && m.NumConnections() >= max { 2162 // We're not allowed to accept any more connections. 2163 return errConnLimitReached 2164 } 2165 2166 return nil 2167} 2168 2169// GetHello is called when we are about to connect to some remote device. 2170func (m *model) GetHello(id protocol.DeviceID) protocol.HelloIntf { 2171 name := "" 2172 if _, ok := m.cfg.Device(id); ok { 2173 // Set our name (from the config of our device ID) only if we already know about the other side device ID. 2174 if myCfg, ok := m.cfg.Device(m.id); ok { 2175 name = myCfg.Name 2176 } 2177 } 2178 return &protocol.Hello{ 2179 DeviceName: name, 2180 ClientName: m.clientName, 2181 ClientVersion: m.clientVersion, 2182 } 2183} 2184 2185// AddConnection adds a new peer connection to the model. An initial index will 2186// be sent to the connected peer, thereafter index updates whenever the local 2187// folder changes. 2188func (m *model) AddConnection(conn protocol.Connection, hello protocol.Hello) { 2189 deviceID := conn.ID() 2190 device, ok := m.cfg.Device(deviceID) 2191 if !ok { 2192 l.Infoln("Trying to add connection to unknown device") 2193 return 2194 } 2195 2196 // The slightly unusual locking sequence here is because we must acquire 2197 // fmut before pmut. (The locks can be *released* in any order.) 2198 m.fmut.RLock() 2199 m.pmut.Lock() 2200 if oldConn, ok := m.conn[deviceID]; ok { 2201 l.Infoln("Replacing old connection", oldConn, "with", conn, "for", deviceID) 2202 // There is an existing connection to this device that we are 2203 // replacing. We must close the existing connection and wait for the 2204 // close to complete before adding the new connection. We do the 2205 // actual close without holding pmut as the connection will call 2206 // back into Closed() for the cleanup. 2207 closed := m.closed[deviceID] 2208 m.fmut.RUnlock() 2209 m.pmut.Unlock() 2210 oldConn.Close(errReplacingConnection) 2211 <-closed 2212 // Again, lock fmut before pmut. 2213 m.fmut.RLock() 2214 m.pmut.Lock() 2215 } 2216 2217 m.conn[deviceID] = conn 2218 closed := make(chan struct{}) 2219 m.closed[deviceID] = closed 2220 m.deviceDownloads[deviceID] = newDeviceDownloadState() 2221 indexRegistry := newIndexHandlerRegistry(conn, m.deviceDownloads[deviceID], closed, m.Supervisor, m.evLogger) 2222 for id, fcfg := range m.folderCfgs { 2223 indexRegistry.RegisterFolderState(fcfg, m.folderFiles[id], m.folderRunners[id]) 2224 } 2225 m.indexHandlers[deviceID] = indexRegistry 2226 m.fmut.RUnlock() 2227 // 0: default, <0: no limiting 2228 switch { 2229 case device.MaxRequestKiB > 0: 2230 m.connRequestLimiters[deviceID] = util.NewSemaphore(1024 * device.MaxRequestKiB) 2231 case device.MaxRequestKiB == 0: 2232 m.connRequestLimiters[deviceID] = util.NewSemaphore(1024 * defaultPullerPendingKiB) 2233 } 2234 2235 m.helloMessages[deviceID] = hello 2236 2237 event := map[string]string{ 2238 "id": deviceID.String(), 2239 "deviceName": hello.DeviceName, 2240 "clientName": hello.ClientName, 2241 "clientVersion": hello.ClientVersion, 2242 "type": conn.Type(), 2243 } 2244 2245 addr := conn.RemoteAddr() 2246 if addr != nil { 2247 event["addr"] = addr.String() 2248 } 2249 2250 m.evLogger.Log(events.DeviceConnected, event) 2251 2252 l.Infof(`Device %s client is "%s %s" named "%s" at %s`, deviceID, hello.ClientName, hello.ClientVersion, hello.DeviceName, conn) 2253 2254 conn.Start() 2255 m.pmut.Unlock() 2256 2257 // Acquires fmut, so has to be done outside of pmut. 2258 cm, passwords := m.generateClusterConfig(deviceID) 2259 conn.SetFolderPasswords(passwords) 2260 conn.ClusterConfig(cm) 2261 2262 if (device.Name == "" || m.cfg.Options().OverwriteRemoteDevNames) && hello.DeviceName != "" { 2263 m.cfg.Modify(func(cfg *config.Configuration) { 2264 for i := range cfg.Devices { 2265 if cfg.Devices[i].DeviceID == deviceID { 2266 if cfg.Devices[i].Name == "" || cfg.Options.OverwriteRemoteDevNames { 2267 cfg.Devices[i].Name = hello.DeviceName 2268 } 2269 return 2270 } 2271 } 2272 }) 2273 } 2274 2275 m.deviceWasSeen(deviceID) 2276} 2277 2278func (m *model) DownloadProgress(device protocol.DeviceID, folder string, updates []protocol.FileDownloadProgressUpdate) error { 2279 m.fmut.RLock() 2280 cfg, ok := m.folderCfgs[folder] 2281 m.fmut.RUnlock() 2282 2283 if !ok || cfg.DisableTempIndexes || !cfg.SharedWith(device) { 2284 return nil 2285 } 2286 2287 m.pmut.RLock() 2288 downloads := m.deviceDownloads[device] 2289 m.pmut.RUnlock() 2290 downloads.Update(folder, updates) 2291 state := downloads.GetBlockCounts(folder) 2292 2293 m.evLogger.Log(events.RemoteDownloadProgress, map[string]interface{}{ 2294 "device": device.String(), 2295 "folder": folder, 2296 "state": state, 2297 }) 2298 2299 return nil 2300} 2301 2302func (m *model) deviceWasSeen(deviceID protocol.DeviceID) { 2303 m.fmut.RLock() 2304 sr, ok := m.deviceStatRefs[deviceID] 2305 m.fmut.RUnlock() 2306 if ok { 2307 _ = sr.WasSeen() 2308 } 2309} 2310 2311func (m *model) deviceDidClose(deviceID protocol.DeviceID, duration time.Duration) { 2312 m.fmut.RLock() 2313 sr, ok := m.deviceStatRefs[deviceID] 2314 m.fmut.RUnlock() 2315 if ok { 2316 _ = sr.LastConnectionDuration(duration) 2317 } 2318} 2319 2320func (m *model) requestGlobal(ctx context.Context, deviceID protocol.DeviceID, folder, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { 2321 m.pmut.RLock() 2322 nc, ok := m.conn[deviceID] 2323 m.pmut.RUnlock() 2324 2325 if !ok { 2326 return nil, fmt.Errorf("requestGlobal: no such device: %s", deviceID) 2327 } 2328 2329 l.Debugf("%v REQ(out): %s: %q / %q b=%d o=%d s=%d h=%x wh=%x ft=%t", m, deviceID, folder, name, blockNo, offset, size, hash, weakHash, fromTemporary) 2330 2331 return nc.Request(ctx, folder, name, blockNo, offset, size, hash, weakHash, fromTemporary) 2332} 2333 2334func (m *model) ScanFolders() map[string]error { 2335 m.fmut.RLock() 2336 folders := make([]string, 0, len(m.folderCfgs)) 2337 for folder := range m.folderCfgs { 2338 folders = append(folders, folder) 2339 } 2340 m.fmut.RUnlock() 2341 2342 errors := make(map[string]error, len(m.folderCfgs)) 2343 errorsMut := sync.NewMutex() 2344 2345 wg := sync.NewWaitGroup() 2346 wg.Add(len(folders)) 2347 for _, folder := range folders { 2348 folder := folder 2349 go func() { 2350 err := m.ScanFolder(folder) 2351 if err != nil { 2352 errorsMut.Lock() 2353 errors[folder] = err 2354 errorsMut.Unlock() 2355 } 2356 wg.Done() 2357 }() 2358 } 2359 wg.Wait() 2360 return errors 2361} 2362 2363func (m *model) ScanFolder(folder string) error { 2364 return m.ScanFolderSubdirs(folder, nil) 2365} 2366 2367func (m *model) ScanFolderSubdirs(folder string, subs []string) error { 2368 m.fmut.RLock() 2369 err := m.checkFolderRunningLocked(folder) 2370 runner := m.folderRunners[folder] 2371 m.fmut.RUnlock() 2372 2373 if err != nil { 2374 return err 2375 } 2376 2377 return runner.Scan(subs) 2378} 2379 2380func (m *model) DelayScan(folder string, next time.Duration) { 2381 m.fmut.RLock() 2382 runner, ok := m.folderRunners[folder] 2383 m.fmut.RUnlock() 2384 if !ok { 2385 return 2386 } 2387 runner.DelayScan(next) 2388} 2389 2390// numHashers returns the number of hasher routines to use for a given folder, 2391// taking into account configuration and available CPU cores. 2392func (m *model) numHashers(folder string) int { 2393 m.fmut.RLock() 2394 folderCfg := m.folderCfgs[folder] 2395 numFolders := len(m.folderCfgs) 2396 m.fmut.RUnlock() 2397 2398 if folderCfg.Hashers > 0 { 2399 // Specific value set in the config, use that. 2400 return folderCfg.Hashers 2401 } 2402 2403 if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { 2404 // Interactive operating systems; don't load the system too heavily by 2405 // default. 2406 return 1 2407 } 2408 2409 // For other operating systems and architectures, lets try to get some 2410 // work done... Divide the available CPU cores among the configured 2411 // folders. 2412 if perFolder := runtime.GOMAXPROCS(-1) / numFolders; perFolder > 0 { 2413 return perFolder 2414 } 2415 2416 return 1 2417} 2418 2419// generateClusterConfig returns a ClusterConfigMessage that is correct and the 2420// set of folder passwords for the given peer device 2421func (m *model) generateClusterConfig(device protocol.DeviceID) (protocol.ClusterConfig, map[string]string) { 2422 var message protocol.ClusterConfig 2423 2424 m.fmut.RLock() 2425 defer m.fmut.RUnlock() 2426 2427 folders := m.cfg.FolderList() 2428 passwords := make(map[string]string, len(folders)) 2429 for _, folderCfg := range folders { 2430 if !folderCfg.SharedWith(device) { 2431 continue 2432 } 2433 2434 encryptionToken, hasEncryptionToken := m.folderEncryptionPasswordTokens[folderCfg.ID] 2435 if folderCfg.Type == config.FolderTypeReceiveEncrypted && !hasEncryptionToken { 2436 // We haven't gotten a token for us yet and without one the other 2437 // side can't validate us - pretend we don't have the folder yet. 2438 continue 2439 } 2440 2441 protocolFolder := protocol.Folder{ 2442 ID: folderCfg.ID, 2443 Label: folderCfg.Label, 2444 ReadOnly: folderCfg.Type == config.FolderTypeSendOnly, 2445 IgnorePermissions: folderCfg.IgnorePerms, 2446 IgnoreDelete: folderCfg.IgnoreDelete, 2447 DisableTempIndexes: folderCfg.DisableTempIndexes, 2448 } 2449 2450 fs := m.folderFiles[folderCfg.ID] 2451 2452 // Even if we aren't paused, if we haven't started the folder yet 2453 // pretend we are. Otherwise the remote might get confused about 2454 // the missing index info (and drop all the info). We will send 2455 // another cluster config once the folder is started. 2456 protocolFolder.Paused = folderCfg.Paused || fs == nil 2457 2458 for _, folderDevice := range folderCfg.Devices { 2459 deviceCfg, _ := m.cfg.Device(folderDevice.DeviceID) 2460 2461 protocolDevice := protocol.Device{ 2462 ID: deviceCfg.DeviceID, 2463 Name: deviceCfg.Name, 2464 Addresses: deviceCfg.Addresses, 2465 Compression: deviceCfg.Compression, 2466 CertName: deviceCfg.CertName, 2467 Introducer: deviceCfg.Introducer, 2468 } 2469 2470 if deviceCfg.DeviceID == m.id && hasEncryptionToken { 2471 protocolDevice.EncryptionPasswordToken = encryptionToken 2472 } else if folderDevice.EncryptionPassword != "" { 2473 protocolDevice.EncryptionPasswordToken = protocol.PasswordToken(folderCfg.ID, folderDevice.EncryptionPassword) 2474 if folderDevice.DeviceID == device { 2475 passwords[folderCfg.ID] = folderDevice.EncryptionPassword 2476 } 2477 } 2478 2479 if fs != nil { 2480 if deviceCfg.DeviceID == m.id { 2481 protocolDevice.IndexID = fs.IndexID(protocol.LocalDeviceID) 2482 protocolDevice.MaxSequence = fs.Sequence(protocol.LocalDeviceID) 2483 } else { 2484 protocolDevice.IndexID = fs.IndexID(deviceCfg.DeviceID) 2485 protocolDevice.MaxSequence = fs.Sequence(deviceCfg.DeviceID) 2486 } 2487 } 2488 2489 protocolFolder.Devices = append(protocolFolder.Devices, protocolDevice) 2490 } 2491 2492 message.Folders = append(message.Folders, protocolFolder) 2493 } 2494 2495 return message, passwords 2496} 2497 2498func (m *model) State(folder string) (string, time.Time, error) { 2499 m.fmut.RLock() 2500 runner, ok := m.folderRunners[folder] 2501 m.fmut.RUnlock() 2502 if !ok { 2503 // The returned error should be an actual folder error, so returning 2504 // errors.New("does not exist") or similar here would be 2505 // inappropriate. 2506 return "", time.Time{}, nil 2507 } 2508 state, changed, err := runner.getState() 2509 return state.String(), changed, err 2510} 2511 2512func (m *model) FolderErrors(folder string) ([]FileError, error) { 2513 m.fmut.RLock() 2514 err := m.checkFolderRunningLocked(folder) 2515 runner := m.folderRunners[folder] 2516 m.fmut.RUnlock() 2517 if err != nil { 2518 return nil, err 2519 } 2520 return runner.Errors(), nil 2521} 2522 2523func (m *model) WatchError(folder string) error { 2524 m.fmut.RLock() 2525 err := m.checkFolderRunningLocked(folder) 2526 runner := m.folderRunners[folder] 2527 m.fmut.RUnlock() 2528 if err != nil { 2529 return nil // If the folder isn't running, there's no error to report. 2530 } 2531 return runner.WatchError() 2532} 2533 2534func (m *model) Override(folder string) { 2535 // Grab the runner and the file set. 2536 2537 m.fmut.RLock() 2538 runner, ok := m.folderRunners[folder] 2539 m.fmut.RUnlock() 2540 if !ok { 2541 return 2542 } 2543 2544 // Run the override, taking updates as if they came from scanning. 2545 2546 runner.Override() 2547} 2548 2549func (m *model) Revert(folder string) { 2550 // Grab the runner and the file set. 2551 2552 m.fmut.RLock() 2553 runner, ok := m.folderRunners[folder] 2554 m.fmut.RUnlock() 2555 if !ok { 2556 return 2557 } 2558 2559 // Run the revert, taking updates as if they came from scanning. 2560 2561 runner.Revert() 2562} 2563 2564type TreeEntry struct { 2565 Name string `json:"name"` 2566 ModTime time.Time `json:"modTime"` 2567 Size int64 `json:"size"` 2568 Type protocol.FileInfoType `json:"type"` 2569 Children []*TreeEntry `json:"children,omitempty"` 2570} 2571 2572func findByName(slice []*TreeEntry, name string) *TreeEntry { 2573 for _, child := range slice { 2574 if child.Name == name { 2575 return child 2576 } 2577 } 2578 return nil 2579} 2580 2581func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly bool) ([]*TreeEntry, error) { 2582 m.fmut.RLock() 2583 files, ok := m.folderFiles[folder] 2584 m.fmut.RUnlock() 2585 if !ok { 2586 return nil, ErrFolderMissing 2587 } 2588 2589 root := &TreeEntry{ 2590 Children: make([]*TreeEntry, 0), 2591 } 2592 sep := string(filepath.Separator) 2593 prefix = osutil.NativeFilename(prefix) 2594 2595 if prefix != "" && !strings.HasSuffix(prefix, sep) { 2596 prefix = prefix + sep 2597 } 2598 2599 snap, err := files.Snapshot() 2600 if err != nil { 2601 return nil, err 2602 } 2603 defer snap.Release() 2604 snap.WithPrefixedGlobalTruncated(prefix, func(fi protocol.FileIntf) bool { 2605 f := fi.(db.FileInfoTruncated) 2606 2607 // Don't include the prefix itself. 2608 if f.IsInvalid() || f.IsDeleted() || strings.HasPrefix(prefix, f.Name) { 2609 return true 2610 } 2611 2612 f.Name = strings.Replace(f.Name, prefix, "", 1) 2613 2614 dir := filepath.Dir(f.Name) 2615 base := filepath.Base(f.Name) 2616 2617 if levels > -1 && strings.Count(f.Name, sep) > levels { 2618 return true 2619 } 2620 2621 parent := root 2622 if dir != "." { 2623 for _, path := range strings.Split(dir, sep) { 2624 child := findByName(parent.Children, path) 2625 if child == nil { 2626 err = fmt.Errorf("could not find child '%s' for path '%s' in parent '%s'", path, f.Name, parent.Name) 2627 return false 2628 } 2629 parent = child 2630 } 2631 } 2632 2633 if dirsOnly && !f.IsDirectory() { 2634 return true 2635 } 2636 2637 parent.Children = append(parent.Children, &TreeEntry{ 2638 Name: base, 2639 Type: f.Type, 2640 ModTime: f.ModTime(), 2641 Size: f.FileSize(), 2642 }) 2643 2644 return true 2645 }) 2646 if err != nil { 2647 return nil, err 2648 } 2649 2650 return root.Children, nil 2651} 2652 2653func (m *model) GetFolderVersions(folder string) (map[string][]versioner.FileVersion, error) { 2654 m.fmut.RLock() 2655 err := m.checkFolderRunningLocked(folder) 2656 ver := m.folderVersioners[folder] 2657 m.fmut.RUnlock() 2658 if err != nil { 2659 return nil, err 2660 } 2661 if ver == nil { 2662 return nil, errNoVersioner 2663 } 2664 2665 return ver.GetVersions() 2666} 2667 2668func (m *model) RestoreFolderVersions(folder string, versions map[string]time.Time) (map[string]error, error) { 2669 m.fmut.RLock() 2670 err := m.checkFolderRunningLocked(folder) 2671 fcfg := m.folderCfgs[folder] 2672 ver := m.folderVersioners[folder] 2673 m.fmut.RUnlock() 2674 if err != nil { 2675 return nil, err 2676 } 2677 if ver == nil { 2678 return nil, errNoVersioner 2679 } 2680 2681 restoreErrors := make(map[string]error) 2682 2683 for file, version := range versions { 2684 if err := ver.Restore(file, version); err != nil { 2685 restoreErrors[file] = err 2686 } 2687 } 2688 2689 // Trigger scan 2690 if !fcfg.FSWatcherEnabled { 2691 go func() { _ = m.ScanFolder(folder) }() 2692 } 2693 2694 return restoreErrors, nil 2695} 2696 2697func (m *model) Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) ([]Availability, error) { 2698 // The slightly unusual locking sequence here is because we need to hold 2699 // pmut for the duration (as the value returned from foldersFiles can 2700 // get heavily modified on Close()), but also must acquire fmut before 2701 // pmut. (The locks can be *released* in any order.) 2702 m.fmut.RLock() 2703 m.pmut.RLock() 2704 defer m.pmut.RUnlock() 2705 2706 fs, ok := m.folderFiles[folder] 2707 cfg := m.folderCfgs[folder] 2708 m.fmut.RUnlock() 2709 2710 if !ok { 2711 return nil, ErrFolderMissing 2712 } 2713 2714 snap, err := fs.Snapshot() 2715 if err != nil { 2716 return nil, err 2717 } 2718 defer snap.Release() 2719 2720 return m.availabilityInSnapshotPRlocked(cfg, snap, file, block), nil 2721} 2722 2723func (m *model) availabilityInSnapshot(cfg config.FolderConfiguration, snap *db.Snapshot, file protocol.FileInfo, block protocol.BlockInfo) []Availability { 2724 m.pmut.RLock() 2725 defer m.pmut.RUnlock() 2726 return m.availabilityInSnapshotPRlocked(cfg, snap, file, block) 2727} 2728 2729func (m *model) availabilityInSnapshotPRlocked(cfg config.FolderConfiguration, snap *db.Snapshot, file protocol.FileInfo, block protocol.BlockInfo) []Availability { 2730 var availabilities []Availability 2731 for _, device := range snap.Availability(file.Name) { 2732 if _, ok := m.remotePausedFolders[device]; !ok { 2733 continue 2734 } 2735 if _, ok := m.remotePausedFolders[device][cfg.ID]; ok { 2736 continue 2737 } 2738 _, ok := m.conn[device] 2739 if ok { 2740 availabilities = append(availabilities, Availability{ID: device, FromTemporary: false}) 2741 } 2742 } 2743 2744 for _, device := range cfg.Devices { 2745 if m.deviceDownloads[device.DeviceID].Has(cfg.ID, file.Name, file.Version, int(block.Offset/int64(file.BlockSize()))) { 2746 availabilities = append(availabilities, Availability{ID: device.DeviceID, FromTemporary: true}) 2747 } 2748 } 2749 2750 return availabilities 2751} 2752 2753// BringToFront bumps the given files priority in the job queue. 2754func (m *model) BringToFront(folder, file string) { 2755 m.fmut.RLock() 2756 runner, ok := m.folderRunners[folder] 2757 m.fmut.RUnlock() 2758 2759 if ok { 2760 runner.BringToFront(file) 2761 } 2762} 2763 2764func (m *model) ResetFolder(folder string) { 2765 l.Infof("Cleaning data for folder %q", folder) 2766 db.DropFolder(m.db, folder) 2767} 2768 2769func (m *model) String() string { 2770 return fmt.Sprintf("model@%p", m) 2771} 2772 2773func (m *model) VerifyConfiguration(from, to config.Configuration) error { 2774 toFolders := to.FolderMap() 2775 for _, from := range from.Folders { 2776 to, ok := toFolders[from.ID] 2777 if ok && from.Type != to.Type && (from.Type == config.FolderTypeReceiveEncrypted || to.Type == config.FolderTypeReceiveEncrypted) { 2778 return errors.New("folder type must not be changed from/to receive-encrypted") 2779 } 2780 } 2781 return nil 2782} 2783 2784func (m *model) CommitConfiguration(from, to config.Configuration) bool { 2785 // TODO: This should not use reflect, and should take more care to try to handle stuff without restart. 2786 2787 // Delay processing config changes until after the initial setup 2788 <-m.started 2789 2790 // Go through the folder configs and figure out if we need to restart or not. 2791 2792 // Tracks devices affected by any configuration change to resend ClusterConfig. 2793 clusterConfigDevices := make(deviceIDSet, len(from.Devices)+len(to.Devices)) 2794 closeDevices := make([]protocol.DeviceID, 0, len(to.Devices)) 2795 2796 fromFolders := mapFolders(from.Folders) 2797 toFolders := mapFolders(to.Folders) 2798 for folderID, cfg := range toFolders { 2799 if _, ok := fromFolders[folderID]; !ok { 2800 // A folder was added. 2801 if cfg.Paused { 2802 l.Infoln("Paused folder", cfg.Description()) 2803 } else { 2804 l.Infoln("Adding folder", cfg.Description()) 2805 if err := m.newFolder(cfg, to.Options.CacheIgnoredFiles); err != nil { 2806 m.fatal(err) 2807 return true 2808 } 2809 } 2810 clusterConfigDevices.add(cfg.DeviceIDs()) 2811 } 2812 } 2813 2814 removedFolders := make(map[string]struct{}) 2815 for folderID, fromCfg := range fromFolders { 2816 toCfg, ok := toFolders[folderID] 2817 if !ok { 2818 // The folder was removed. 2819 m.removeFolder(fromCfg) 2820 clusterConfigDevices.add(fromCfg.DeviceIDs()) 2821 removedFolders[fromCfg.ID] = struct{}{} 2822 continue 2823 } 2824 2825 if fromCfg.Paused && toCfg.Paused { 2826 continue 2827 } 2828 2829 // This folder exists on both sides. Settings might have changed. 2830 // Check if anything differs that requires a restart. 2831 if !reflect.DeepEqual(fromCfg.RequiresRestartOnly(), toCfg.RequiresRestartOnly()) || from.Options.CacheIgnoredFiles != to.Options.CacheIgnoredFiles { 2832 if err := m.restartFolder(fromCfg, toCfg, to.Options.CacheIgnoredFiles); err != nil { 2833 m.fatal(err) 2834 return true 2835 } 2836 clusterConfigDevices.add(fromCfg.DeviceIDs()) 2837 if toCfg.Type != config.FolderTypeReceiveEncrypted { 2838 clusterConfigDevices.add(toCfg.DeviceIDs()) 2839 } else { 2840 // If we don't have the encryption token yet, we need to drop 2841 // the connection to make the remote re-send the cluster-config 2842 // and with it the token. 2843 m.fmut.RLock() 2844 _, ok := m.folderEncryptionPasswordTokens[toCfg.ID] 2845 m.fmut.RUnlock() 2846 if !ok { 2847 for _, id := range toCfg.DeviceIDs() { 2848 closeDevices = append(closeDevices, id) 2849 } 2850 } else { 2851 clusterConfigDevices.add(toCfg.DeviceIDs()) 2852 } 2853 } 2854 } 2855 2856 // Emit the folder pause/resume event 2857 if fromCfg.Paused != toCfg.Paused { 2858 eventType := events.FolderResumed 2859 if toCfg.Paused { 2860 eventType = events.FolderPaused 2861 } 2862 m.evLogger.Log(eventType, map[string]string{"id": toCfg.ID, "label": toCfg.Label}) 2863 } 2864 } 2865 2866 // Removing a device. We actually don't need to do anything. 2867 // Because folder config has changed (since the device lists do not match) 2868 // Folders for that had device got "restarted", which involves killing 2869 // connections to all devices that we were sharing the folder with. 2870 // At some point model.Close() will get called for that device which will 2871 // clean residue device state that is not part of any folder. 2872 2873 // Pausing a device, unpausing is handled by the connection service. 2874 fromDevices := from.DeviceMap() 2875 toDevices := to.DeviceMap() 2876 for deviceID, toCfg := range toDevices { 2877 fromCfg, ok := fromDevices[deviceID] 2878 if !ok { 2879 sr := stats.NewDeviceStatisticsReference(m.db, deviceID) 2880 m.fmut.Lock() 2881 m.deviceStatRefs[deviceID] = sr 2882 m.fmut.Unlock() 2883 continue 2884 } 2885 delete(fromDevices, deviceID) 2886 if fromCfg.Paused == toCfg.Paused { 2887 continue 2888 } 2889 2890 if toCfg.Paused { 2891 l.Infoln("Pausing", deviceID) 2892 closeDevices = append(closeDevices, deviceID) 2893 m.evLogger.Log(events.DevicePaused, map[string]string{"device": deviceID.String()}) 2894 } else { 2895 // Ignored folder was removed, reconnect to retrigger the prompt. 2896 if len(fromCfg.IgnoredFolders) > len(toCfg.IgnoredFolders) { 2897 closeDevices = append(closeDevices, deviceID) 2898 } 2899 2900 l.Infoln("Resuming", deviceID) 2901 m.evLogger.Log(events.DeviceResumed, map[string]string{"device": deviceID.String()}) 2902 } 2903 } 2904 // Clean up after removed devices 2905 removedDevices := make([]protocol.DeviceID, 0, len(fromDevices)) 2906 m.fmut.Lock() 2907 for deviceID := range fromDevices { 2908 delete(m.deviceStatRefs, deviceID) 2909 removedDevices = append(removedDevices, deviceID) 2910 delete(clusterConfigDevices, deviceID) 2911 } 2912 m.fmut.Unlock() 2913 2914 m.pmut.RLock() 2915 for _, id := range closeDevices { 2916 delete(clusterConfigDevices, id) 2917 if conn, ok := m.conn[id]; ok { 2918 go conn.Close(errDevicePaused) 2919 } 2920 } 2921 for _, id := range removedDevices { 2922 delete(clusterConfigDevices, id) 2923 if conn, ok := m.conn[id]; ok { 2924 go conn.Close(errDeviceRemoved) 2925 } 2926 } 2927 m.pmut.RUnlock() 2928 // Generating cluster-configs acquires fmut -> must happen outside of pmut. 2929 m.sendClusterConfig(clusterConfigDevices.AsSlice()) 2930 2931 ignoredDevices := observedDeviceSet(to.IgnoredDevices) 2932 m.cleanPending(toDevices, toFolders, ignoredDevices, removedFolders) 2933 2934 m.globalRequestLimiter.SetCapacity(1024 * to.Options.MaxConcurrentIncomingRequestKiB()) 2935 m.folderIOLimiter.SetCapacity(to.Options.MaxFolderConcurrency()) 2936 2937 // Some options don't require restart as those components handle it fine 2938 // by themselves. Compare the options structs containing only the 2939 // attributes that require restart and act apprioriately. 2940 if !reflect.DeepEqual(from.Options.RequiresRestartOnly(), to.Options.RequiresRestartOnly()) { 2941 l.Debugln(m, "requires restart, options differ") 2942 return false 2943 } 2944 2945 return true 2946} 2947 2948func (m *model) cleanPending(existingDevices map[protocol.DeviceID]config.DeviceConfiguration, existingFolders map[string]config.FolderConfiguration, ignoredDevices deviceIDSet, removedFolders map[string]struct{}) { 2949 var removedPendingFolders []map[string]string 2950 pendingFolders, err := m.db.PendingFolders() 2951 if err != nil { 2952 msg := "Could not iterate through pending folder entries for cleanup" 2953 l.Warnf("%v: %v", msg, err) 2954 m.evLogger.Log(events.Failure, msg) 2955 // Continue with pending devices below, loop is skipped. 2956 } 2957 for folderID, pf := range pendingFolders { 2958 if _, ok := removedFolders[folderID]; ok { 2959 // Forget pending folder device associations for recently removed 2960 // folders as well, assuming the folder is no longer of interest 2961 // at all (but might become pending again). 2962 l.Debugf("Discarding pending removed folder %v from all devices", folderID) 2963 if err := m.db.RemovePendingFolder(folderID); err != nil { 2964 msg := "Failed to remove pending folder entry" 2965 l.Warnf("%v (%v): %v", msg, folderID, err) 2966 m.evLogger.Log(events.Failure, msg) 2967 } else { 2968 removedPendingFolders = append(removedPendingFolders, map[string]string{ 2969 "folderID": folderID, 2970 }) 2971 } 2972 continue 2973 } 2974 for deviceID := range pf.OfferedBy { 2975 if dev, ok := existingDevices[deviceID]; !ok { 2976 l.Debugf("Discarding pending folder %v from unknown device %v", folderID, deviceID) 2977 goto removeFolderForDevice 2978 } else if dev.IgnoredFolder(folderID) { 2979 l.Debugf("Discarding now ignored pending folder %v for device %v", folderID, deviceID) 2980 goto removeFolderForDevice 2981 } 2982 if folderCfg, ok := existingFolders[folderID]; ok { 2983 if folderCfg.SharedWith(deviceID) { 2984 l.Debugf("Discarding now shared pending folder %v for device %v", folderID, deviceID) 2985 goto removeFolderForDevice 2986 } 2987 } 2988 continue 2989 removeFolderForDevice: 2990 if err := m.db.RemovePendingFolderForDevice(folderID, deviceID); err != nil { 2991 msg := "Failed to remove pending folder-device entry" 2992 l.Warnf("%v (%v, %v): %v", msg, folderID, deviceID, err) 2993 m.evLogger.Log(events.Failure, msg) 2994 continue 2995 } 2996 removedPendingFolders = append(removedPendingFolders, map[string]string{ 2997 "folderID": folderID, 2998 "deviceID": deviceID.String(), 2999 }) 3000 } 3001 } 3002 if len(removedPendingFolders) > 0 { 3003 m.evLogger.Log(events.PendingFoldersChanged, map[string]interface{}{ 3004 "removed": removedPendingFolders, 3005 }) 3006 } 3007 3008 var removedPendingDevices []map[string]string 3009 pendingDevices, err := m.db.PendingDevices() 3010 if err != nil { 3011 msg := "Could not iterate through pending device entries for cleanup" 3012 l.Warnf("%v: %v", msg, err) 3013 m.evLogger.Log(events.Failure, msg) 3014 return 3015 } 3016 for deviceID := range pendingDevices { 3017 if _, ok := ignoredDevices[deviceID]; ok { 3018 l.Debugf("Discarding now ignored pending device %v", deviceID) 3019 goto removeDevice 3020 } 3021 if _, ok := existingDevices[deviceID]; ok { 3022 l.Debugf("Discarding now added pending device %v", deviceID) 3023 goto removeDevice 3024 } 3025 continue 3026 removeDevice: 3027 if err := m.db.RemovePendingDevice(deviceID); err != nil { 3028 msg := "Failed to remove pending device entry" 3029 l.Warnf("%v: %v", msg, err) 3030 m.evLogger.Log(events.Failure, msg) 3031 continue 3032 } 3033 removedPendingDevices = append(removedPendingDevices, map[string]string{ 3034 "deviceID": deviceID.String(), 3035 }) 3036 } 3037 if len(removedPendingDevices) > 0 { 3038 m.evLogger.Log(events.PendingDevicesChanged, map[string]interface{}{ 3039 "removed": removedPendingDevices, 3040 }) 3041 } 3042} 3043 3044// checkFolderRunningLocked returns nil if the folder is up and running and a 3045// descriptive error if not. 3046// Need to hold (read) lock on m.fmut when calling this. 3047func (m *model) checkFolderRunningLocked(folder string) error { 3048 _, ok := m.folderRunners[folder] 3049 if ok { 3050 return nil 3051 } 3052 3053 if cfg, ok := m.cfg.Folder(folder); !ok { 3054 return ErrFolderMissing 3055 } else if cfg.Paused { 3056 return ErrFolderPaused 3057 } 3058 3059 return ErrFolderNotRunning 3060} 3061 3062// PendingDevices lists unknown devices that tried to connect. 3063func (m *model) PendingDevices() (map[protocol.DeviceID]db.ObservedDevice, error) { 3064 return m.db.PendingDevices() 3065} 3066 3067// PendingFolders lists folders that we don't yet share with the offering devices. It 3068// returns the entries grouped by folder and filters for a given device unless the 3069// argument is specified as EmptyDeviceID. 3070func (m *model) PendingFolders(device protocol.DeviceID) (map[string]db.PendingFolder, error) { 3071 return m.db.PendingFoldersForDevice(device) 3072} 3073 3074// DismissPendingDevices removes the record of a specific pending device. 3075func (m *model) DismissPendingDevice(device protocol.DeviceID) error { 3076 l.Debugf("Discarding pending device %v", device) 3077 err := m.db.RemovePendingDevice(device) 3078 if err != nil { 3079 return err 3080 } 3081 removedPendingDevices := []map[string]string{ 3082 {"deviceID": device.String()}, 3083 } 3084 m.evLogger.Log(events.PendingDevicesChanged, map[string]interface{}{ 3085 "removed": removedPendingDevices, 3086 }) 3087 return nil 3088} 3089 3090// DismissPendingFolders removes records of pending folders. Either a specific folder / 3091// device combination, or all matching a specific folder ID if the device argument is 3092// specified as EmptyDeviceID. 3093func (m *model) DismissPendingFolder(device protocol.DeviceID, folder string) error { 3094 var removedPendingFolders []map[string]string 3095 if device == protocol.EmptyDeviceID { 3096 l.Debugf("Discarding pending removed folder %s from all devices", folder) 3097 err := m.db.RemovePendingFolder(folder) 3098 if err != nil { 3099 return err 3100 } 3101 removedPendingFolders = []map[string]string{ 3102 {"folderID": folder}, 3103 } 3104 } else { 3105 l.Debugf("Discarding pending folder %s from device %v", folder, device) 3106 err := m.db.RemovePendingFolderForDevice(folder, device) 3107 if err != nil { 3108 return err 3109 } 3110 removedPendingFolders = []map[string]string{ 3111 { 3112 "folderID": folder, 3113 "deviceID": device.String(), 3114 }, 3115 } 3116 } 3117 if len(removedPendingFolders) > 0 { 3118 m.evLogger.Log(events.PendingFoldersChanged, map[string]interface{}{ 3119 "removed": removedPendingFolders, 3120 }) 3121 } 3122 return nil 3123} 3124 3125// mapFolders returns a map of folder ID to folder configuration for the given 3126// slice of folder configurations. 3127func mapFolders(folders []config.FolderConfiguration) map[string]config.FolderConfiguration { 3128 m := make(map[string]config.FolderConfiguration, len(folders)) 3129 for _, cfg := range folders { 3130 m[cfg.ID] = cfg 3131 } 3132 return m 3133} 3134 3135// mapDevices returns a map of device ID to nothing for the given slice of 3136// device IDs. 3137func mapDevices(devices []protocol.DeviceID) map[protocol.DeviceID]struct{} { 3138 m := make(map[protocol.DeviceID]struct{}, len(devices)) 3139 for _, dev := range devices { 3140 m[dev] = struct{}{} 3141 } 3142 return m 3143} 3144 3145func observedDeviceSet(devices []config.ObservedDevice) deviceIDSet { 3146 res := make(deviceIDSet, len(devices)) 3147 for _, dev := range devices { 3148 res[dev.ID] = struct{}{} 3149 } 3150 return res 3151} 3152 3153func readOffsetIntoBuf(fs fs.Filesystem, file string, offset int64, buf []byte) (int, error) { 3154 fd, err := fs.Open(file) 3155 if err != nil { 3156 l.Debugln("readOffsetIntoBuf.Open", file, err) 3157 return 0, err 3158 } 3159 3160 defer fd.Close() 3161 n, err := fd.ReadAt(buf, offset) 3162 if err != nil { 3163 l.Debugln("readOffsetIntoBuf.ReadAt", file, err) 3164 } 3165 return n, err 3166} 3167 3168// folderDeviceSet is a set of (folder, deviceID) pairs 3169type folderDeviceSet map[string]map[protocol.DeviceID]struct{} 3170 3171// set adds the (dev, folder) pair to the set 3172func (s folderDeviceSet) set(dev protocol.DeviceID, folder string) { 3173 devs, ok := s[folder] 3174 if !ok { 3175 devs = make(map[protocol.DeviceID]struct{}) 3176 s[folder] = devs 3177 } 3178 devs[dev] = struct{}{} 3179} 3180 3181// has returns true if the (dev, folder) pair is in the set 3182func (s folderDeviceSet) has(dev protocol.DeviceID, folder string) bool { 3183 _, ok := s[folder][dev] 3184 return ok 3185} 3186 3187// hasDevice returns true if the device is set on any folder 3188func (s folderDeviceSet) hasDevice(dev protocol.DeviceID) bool { 3189 for _, devices := range s { 3190 if _, ok := devices[dev]; ok { 3191 return true 3192 } 3193 } 3194 return false 3195} 3196 3197// syncMutexMap is a type safe wrapper for a sync.Map that holds mutexes 3198type syncMutexMap struct { 3199 inner stdsync.Map 3200} 3201 3202func (m *syncMutexMap) Get(key string) sync.Mutex { 3203 v, _ := m.inner.LoadOrStore(key, sync.NewMutex()) 3204 return v.(sync.Mutex) 3205} 3206 3207type deviceIDSet map[protocol.DeviceID]struct{} 3208 3209func (s deviceIDSet) add(ids []protocol.DeviceID) { 3210 for _, id := range ids { 3211 if _, ok := s[id]; !ok { 3212 s[id] = struct{}{} 3213 } 3214 } 3215} 3216 3217func (s deviceIDSet) AsSlice() []protocol.DeviceID { 3218 ids := make([]protocol.DeviceID, 0, len(s)) 3219 for id := range s { 3220 ids = append(ids, id) 3221 } 3222 return ids 3223} 3224 3225func encryptionTokenPath(cfg config.FolderConfiguration) string { 3226 return filepath.Join(cfg.MarkerName, config.EncryptionTokenName) 3227} 3228 3229type storedEncryptionToken struct { 3230 FolderID string 3231 Token []byte 3232} 3233 3234func readEncryptionToken(cfg config.FolderConfiguration) ([]byte, error) { 3235 fd, err := cfg.Filesystem().Open(encryptionTokenPath(cfg)) 3236 if err != nil { 3237 return nil, err 3238 } 3239 defer fd.Close() 3240 var stored storedEncryptionToken 3241 if err := json.NewDecoder(fd).Decode(&stored); err != nil { 3242 return nil, err 3243 } 3244 return stored.Token, nil 3245} 3246 3247func writeEncryptionToken(token []byte, cfg config.FolderConfiguration) error { 3248 tokenName := encryptionTokenPath(cfg) 3249 fd, err := cfg.Filesystem().OpenFile(tokenName, fs.OptReadWrite|fs.OptCreate, 0666) 3250 if err != nil { 3251 return err 3252 } 3253 defer fd.Close() 3254 return json.NewEncoder(fd).Encode(storedEncryptionToken{ 3255 FolderID: cfg.ID, 3256 Token: token, 3257 }) 3258} 3259 3260func newFolderConfiguration(w config.Wrapper, id, label string, fsType fs.FilesystemType, path string) config.FolderConfiguration { 3261 fcfg := w.DefaultFolder() 3262 fcfg.ID = id 3263 fcfg.Label = label 3264 fcfg.FilesystemType = fsType 3265 fcfg.Path = path 3266 return fcfg 3267} 3268 3269type updatedPendingFolder struct { 3270 FolderID string `json:"folderID"` 3271 FolderLabel string `json:"folderLabel"` 3272 DeviceID protocol.DeviceID `json:"deviceID"` 3273 ReceiveEncrypted bool `json:"receiveEncrypted"` 3274 RemoteEncrypted bool `json:"remoteEncrypted"` 3275} 3276 3277// redactPathError checks if the error is actually a os.PathError, and if yes 3278// returns a redactedError with the path removed. 3279func redactPathError(err error) (error, bool) { 3280 perr, ok := err.(*os.PathError) 3281 if !ok { 3282 return nil, false 3283 } 3284 return &redactedError{ 3285 error: err, 3286 redacted: fmt.Errorf("%v: %w", perr.Op, perr.Err), 3287 }, true 3288} 3289 3290type redactedError struct { 3291 error 3292 redacted error 3293} 3294