1 // distribution boxbackup-0.11_trunk_2979 (svn version: 2979)
2 // Box Backup, http://www.boxbackup.org/
3 //
4 // Copyright (c) 2003-2010, Ben Summers and contributors.
5 // All rights reserved.
6 //
7 // Note that this project uses mixed licensing. Any file with this license
8 // attached, or where the code LICENSE-GPL appears on the first line, falls
9 // under the "Box Backup GPL" license. See the file COPYING.txt for more
10 // information about this license.
11 //
12 // ---------------------------------------------------------------------
13 // This program is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU General Public License
15 // as published by the Free Software Foundation; either version 2
16 // of the License, or (at your option) any later version.
17 //
18 // This program is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21 // GNU General Public License for more details.
22 //
23 // You should have received a copy of the GNU General Public License
24 // along with this program; if not, write to the Free Software
25 // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
26 //
27 // [http://www.gnu.org/licenses/old-licenses/gpl-2.0.html#SEC4]
28 //
29 // As a special exception to the GPLv2, the Box Backup Project gives
30 // permission to link any code falling under this license (the Box Backup
31 // GPL) with any software that can be downloaded from
32 // the OpenSSL website [http://www.openssl.org] under either the
33 // "OpenSSL License" or the "Original SSLeay License", and to distribute
34 // the linked executables under the terms of the "Box Backup GPL" license.
35 //
36 // As a special exception to the GPLv2, the Box Backup Project gives
37 // permission to link any code falling under this license (the Box Backup
38 // GPL) with any version of Microsoft's Volume Shadow Copy Service 7.2 SDK
39 // or Microsoft Windows Software Development Kit (SDK), including
40 // vssapi.lib, that can be downloaded from the Microsoft website
41 // [*.microsoft.com], and to distribute the linked executables under the
42 // terms of the "Box Backup GPL" license.
43 // --------------------------------------------------------------------------
44 //
45 // File
46 //		Name:    BackupClientDirectoryRecord.cpp
47 //		Purpose: Implementation of record about directory for
48 //			 backup client
49 //		Created: 2003/10/08
50 //
51 // --------------------------------------------------------------------------
52 
53 #include "Box.h"
54 
55 #ifdef HAVE_DIRENT_H
56 	#include <dirent.h>
57 #endif
58 
59 #include <errno.h>
60 #include <string.h>
61 
62 #include "autogen_BackupProtocolClient.h"
63 #include "Archive.h"
64 #include "BackupClientContext.h"
65 #include "BackupClientDirectoryRecord.h"
66 #include "BackupClientInodeToIDMap.h"
67 #include "BackupDaemon.h"
68 #include "BackupStoreException.h"
69 #include "BackupStoreFile.h"
70 #include "BackupStoreFileEncodeStream.h"
71 #include "CommonException.h"
72 #include "CollectInBufferStream.h"
73 #include "FileModificationTime.h"
74 #include "IOStream.h"
75 #include "Logging.h"
76 #include "MemBlockStream.h"
77 #include "PathUtils.h"
78 #include "RateLimitingStream.h"
79 #include "ReadLoggingStream.h"
80 
81 #include "MemLeakFindOn.h"
82 
83 typedef std::map<std::string, BackupStoreDirectory::Entry *> DecryptedEntriesMap_t;
84 
85 // --------------------------------------------------------------------------
86 //
87 // Function
88 //		Name:    BackupClientDirectoryRecord::BackupClientDirectoryRecord()
89 //		Purpose: Constructor
90 //		Created: 2003/10/08
91 //
92 // --------------------------------------------------------------------------
BackupClientDirectoryRecord(int64_t ObjectID,const std::string & rSubDirName)93 BackupClientDirectoryRecord::BackupClientDirectoryRecord(int64_t ObjectID, const std::string &rSubDirName)
94 	: mObjectID(ObjectID),
95 	  mSubDirName(rSubDirName),
96 	  mInitialSyncDone(false),
97 	  mSyncDone(false),
98 	  mSuppressMultipleLinksWarning(false),
99 	  mpPendingEntries(0)
100 {
101 	::memset(mStateChecksum, 0, sizeof(mStateChecksum));
102 }
103 
104 // --------------------------------------------------------------------------
105 //
106 // Function
107 //		Name:    BackupClientDirectoryRecord::~BackupClientDirectoryRecord()
108 //		Purpose: Destructor
109 //		Created: 2003/10/08
110 //
111 // --------------------------------------------------------------------------
~BackupClientDirectoryRecord()112 BackupClientDirectoryRecord::~BackupClientDirectoryRecord()
113 {
114 	// Make deletion recursive
115 	DeleteSubDirectories();
116 
117 	// Delete maps
118 	if(mpPendingEntries != 0)
119 	{
120 		delete mpPendingEntries;
121 		mpPendingEntries = 0;
122 	}
123 }
124 
125 // --------------------------------------------------------------------------
126 //
127 // Function
128 //		Name:    BackupClientDirectoryRecord::DeleteSubDirectories();
129 //		Purpose: Delete all sub directory entries
130 //		Created: 2003/10/09
131 //
132 // --------------------------------------------------------------------------
DeleteSubDirectories()133 void BackupClientDirectoryRecord::DeleteSubDirectories()
134 {
135 	// Delete all pointers
136 	for(std::map<std::string, BackupClientDirectoryRecord *>::iterator i = mSubDirectories.begin();
137 		i != mSubDirectories.end(); ++i)
138 	{
139 		delete i->second;
140 	}
141 
142 	// Empty list
143 	mSubDirectories.clear();
144 }
145 
146 // --------------------------------------------------------------------------
147 //
148 // Function
149 //		Name:    BackupClientDirectoryRecord::SyncDirectory(i
150 //			 BackupClientDirectoryRecord::SyncParams &,
151 //			 int64_t, const std::string &,
152 //			 const std::string &, bool)
153 //		Purpose: Recursively synchronise a local directory
154 //			 with the server.
155 //		Created: 2003/10/08
156 //
157 // --------------------------------------------------------------------------
SyncDirectory(BackupClientDirectoryRecord::SyncParams & rParams,int64_t ContainingDirectoryID,const std::string & rLocalPath,const std::string & rRemotePath,bool ThisDirHasJustBeenCreated)158 void BackupClientDirectoryRecord::SyncDirectory(
159 	BackupClientDirectoryRecord::SyncParams &rParams,
160 	int64_t ContainingDirectoryID,
161 	const std::string &rLocalPath,
162 	const std::string &rRemotePath,
163 	bool ThisDirHasJustBeenCreated)
164 {
165 	BackupClientContext& rContext(rParams.mrContext);
166 	ProgressNotifier& rNotifier(rContext.GetProgressNotifier());
167 
168 	// Signal received by daemon?
169 	if(rParams.mrRunStatusProvider.StopRun())
170 	{
171 		// Yes. Stop now.
172 		THROW_EXCEPTION(BackupStoreException, SignalReceived)
173 	}
174 
175 	// Start by making some flag changes, marking this sync as not done,
176 	// and on the immediate sub directories.
177 	mSyncDone = false;
178 	for(std::map<std::string, BackupClientDirectoryRecord *>::iterator
179 		i  = mSubDirectories.begin();
180 		i != mSubDirectories.end(); ++i)
181 	{
182 		i->second->mSyncDone = false;
183 	}
184 
185 	// Work out the time in the future after which the file should
186 	// be uploaded regardless. This is a simple way to avoid having
187 	// too many problems with file servers when they have clients
188 	// with badly out of sync clocks.
189 	rParams.mUploadAfterThisTimeInTheFuture = GetCurrentBoxTime() +
190 		rParams.mMaxFileTimeInFuture;
191 
192 	// Build the current state checksum to compare against while
193 	// getting info from dirs. Note checksum is used locally only,
194 	// so byte order isn't considered.
195 	MD5Digest currentStateChecksum;
196 
197 	EMU_STRUCT_STAT dest_st;
198 	// Stat the directory, to get attribute info
199 	// If it's a symbolic link, we want the link target here
200 	// (as we're about to back up the contents of the directory)
201 	{
202 		if(EMU_STAT(rLocalPath.c_str(), &dest_st) != 0)
203 		{
204 			// The directory has probably been deleted, so
205 			// just ignore this error. In a future scan, this
206 			// deletion will be noticed, deleted from server,
207 			// and this object deleted.
208 			rNotifier.NotifyDirStatFailed(this, rLocalPath,
209 				strerror(errno));
210 			return;
211 		}
212 
213 		BOX_TRACE("Stat dir '" << rLocalPath << "' "
214 			"found device/inode " <<
215 			dest_st.st_dev << "/" << dest_st.st_ino);
216 
217 		// Store inode number in map so directories are tracked
218 		// in case they're renamed
219 		{
220 			BackupClientInodeToIDMap &idMap(
221 				rParams.mrContext.GetNewIDMap());
222 			idMap.AddToMap(dest_st.st_ino, mObjectID,
223 				ContainingDirectoryID);
224 		}
225 		// Add attributes to checksum
226 		currentStateChecksum.Add(&dest_st.st_mode,
227 			sizeof(dest_st.st_mode));
228 		currentStateChecksum.Add(&dest_st.st_uid,
229 			sizeof(dest_st.st_uid));
230 		currentStateChecksum.Add(&dest_st.st_gid,
231 			sizeof(dest_st.st_gid));
232 		// Inode to be paranoid about things moving around
233 		currentStateChecksum.Add(&dest_st.st_ino,
234 			sizeof(dest_st.st_ino));
235 #ifdef HAVE_STRUCT_STAT_ST_FLAGS
236 		currentStateChecksum.Add(&dest_st.st_flags,
237 			sizeof(dest_st.st_flags));
238 #endif
239 
240 		StreamableMemBlock xattr;
241 		BackupClientFileAttributes::FillExtendedAttr(xattr,
242 			rLocalPath.c_str());
243 		currentStateChecksum.Add(xattr.GetBuffer(), xattr.GetSize());
244 	}
245 
246 	// Read directory entries, building arrays of names
247 	// First, need to read the contents of the directory.
248 	std::vector<std::string> dirs;
249 	std::vector<std::string> files;
250 	bool downloadDirectoryRecordBecauseOfFutureFiles = false;
251 
252 	EMU_STRUCT_STAT link_st;
253 	if(EMU_LSTAT(rLocalPath.c_str(), &link_st) != 0)
254 	{
255 		// Report the error (logs and
256 		// eventual email to administrator)
257 		rNotifier.NotifyFileStatFailed(this, rLocalPath,
258 			strerror(errno));
259 
260 		// FIXME move to NotifyFileStatFailed()
261 		SetErrorWhenReadingFilesystemObject(rParams,
262 			rLocalPath.c_str());
263 
264 		// This shouldn't happen, so we'd better not continue
265 		THROW_EXCEPTION(CommonException, OSFileError)
266 	}
267 
268 	// BLOCK
269 	{
270 		// read the contents...
271 		DIR *dirHandle = 0;
272 		try
273 		{
274 			rNotifier.NotifyScanDirectory(this, rLocalPath);
275 
276 			dirHandle = ::opendir(rLocalPath.c_str());
277 			if(dirHandle == 0)
278 			{
279 				// Report the error (logs and
280 				// eventual email to administrator)
281 				if (errno == EACCES)
282 				{
283 					rNotifier.NotifyDirListFailed(this,
284 						rLocalPath, "Access denied");
285 				}
286 				else
287 				{
288 					rNotifier.NotifyDirListFailed(this,
289 						rLocalPath, strerror(errno));
290 				}
291 
292 				// Report the error (logs and eventual email
293 				// to administrator)
294 				SetErrorWhenReadingFilesystemObject(rParams,
295 					rLocalPath.c_str());
296 				// Ignore this directory for now.
297 				return;
298 			}
299 
300 			// Basic structure for checksum info
301 			struct {
302 				box_time_t mModificationTime;
303 				box_time_t mAttributeModificationTime;
304 				int64_t mSize;
305 				// And then the name follows
306 			} checksum_info;
307 			// Be paranoid about structure packing
308 			::memset(&checksum_info, 0, sizeof(checksum_info));
309 
310 			struct dirent *en = 0;
311 			EMU_STRUCT_STAT file_st;
312 			std::string filename;
313 			while((en = ::readdir(dirHandle)) != 0)
314 			{
315 				rParams.mrContext.DoKeepAlive();
316 
317 				// Don't need to use
318 				// LinuxWorkaround_FinishDirentStruct(en,
319 				// rLocalPath.c_str());
320 				// on Linux, as a stat is performed to
321 				// get all this info
322 
323 				if(en->d_name[0] == '.' &&
324 					(en->d_name[1] == '\0' || (en->d_name[1] == '.' && en->d_name[2] == '\0')))
325 				{
326 					// ignore, it's . or ..
327 					continue;
328 				}
329 
330 				// Stat file to get info
331 				filename = MakeFullPath(rLocalPath, en->d_name);
332 
333 				#ifdef WIN32
334 				// Don't stat the file just yet, to ensure
335 				// that users can exclude unreadable files
336 				// to suppress warnings that they are
337 				// not accessible.
338 				//
339 				// Our emulated readdir() abuses en->d_type,
340 				// which would normally contain DT_REG,
341 				// DT_DIR, etc, but we only use it here and
342 				// prefer S_IFREG, S_IFDIR...
343 				int type = en->d_type;
344 				#else
345 				if(EMU_LSTAT(filename.c_str(), &file_st) != 0)
346 				{
347 					if(!(rParams.mrContext.ExcludeDir(
348 						filename)))
349 					{
350 						// Report the error (logs and
351 						// eventual email to
352 						// administrator)
353  						rNotifier.NotifyFileStatFailed(
354 							this, filename,
355 							strerror(errno));
356 
357 						// FIXME move to
358 						// NotifyFileStatFailed()
359 						SetErrorWhenReadingFilesystemObject(
360 							rParams, filename.c_str());
361 					}
362 
363 					// Ignore this entry for now.
364 					continue;
365 				}
366 
367 				int type = file_st.st_mode & S_IFMT;
368 
369 				// ecryptfs reports nlink > 1 for directories
370 				// with contents, but no filesystem supports
371 				// hardlinking directories? so we can ignore
372 				// this if the entry is a directory.
373 				if(file_st.st_nlink != 1 && type == S_IFDIR)
374 				{
375 					BOX_INFO("Ignoring apparent hard link "
376 						"count on directory: " <<
377 						filename << ", nlink=" <<
378 						file_st.st_nlink);
379 				}
380 				else if(file_st.st_nlink != 1)
381 				{
382 					if(!mSuppressMultipleLinksWarning)
383 					{
384 						BOX_WARNING("File is hard linked, this may "
385 							"cause rename tracking to fail and "
386 							"move files incorrectly in your "
387 							"backup! " << filename <<
388 							", nlink=" << file_st.st_nlink <<
389 							" (suppressing further warnings");
390 						mSuppressMultipleLinksWarning = true;
391 					}
392 					SetErrorWhenReadingFilesystemObject(
393 						rParams, filename.c_str());
394 				}
395 
396 				BOX_TRACE("Stat entry '" << filename << "' "
397 					"found device/inode " <<
398 					file_st.st_dev << "/" <<
399 					file_st.st_ino);
400 
401 				/* Workaround for apparent btrfs bug, where
402 				symlinks appear to be on a different filesystem
403 				than their containing directory, thanks to
404 				Toke Hoiland-Jorgensen */
405 				if(type == S_IFDIR &&
406 					file_st.st_dev != dest_st.st_dev)
407 				{
408 					if(!(rParams.mrContext.ExcludeDir(
409 						filename)))
410 					{
411 						rNotifier.NotifyMountPointSkipped(
412 							this, filename);
413 					}
414 					continue;
415 				}
416 				#endif
417 
418 				if(type == S_IFREG || type == S_IFLNK)
419 				{
420 					// File or symbolic link
421 
422 					// Exclude it?
423 					if(rParams.mrContext.ExcludeFile(filename))
424 					{
425  						rNotifier.NotifyFileExcluded(
426 								this,
427 								filename);
428 
429 						// Next item!
430 						continue;
431 					}
432 
433 					// Store on list
434 					files.push_back(std::string(en->d_name));
435 				}
436 				else if(type == S_IFDIR)
437 				{
438 					// Directory
439 
440 					// Exclude it?
441 					if(rParams.mrContext.ExcludeDir(filename))
442 					{
443  						rNotifier.NotifyDirExcluded(
444 								this,
445 								filename);
446 
447 						// Next item!
448 						continue;
449 					}
450 
451 					// Store on list
452 					dirs.push_back(std::string(en->d_name));
453 				}
454 				else
455 				{
456 					if (type == S_IFSOCK
457 #						ifndef WIN32
458 						|| type == S_IFIFO
459 #						endif
460 						)
461 					{
462 						// removed notification for these types
463 						// see Debian bug 479145, no objections
464 					}
465 					else if(rParams.mrContext.ExcludeFile(filename))
466 					{
467  						rNotifier.NotifyFileExcluded(
468 								this,
469 								filename);
470 					}
471 					else
472 					{
473  						rNotifier.NotifyUnsupportedFileType(
474 								this, filename);
475 						SetErrorWhenReadingFilesystemObject(
476 							rParams, filename.c_str());
477 					}
478 
479 					continue;
480 				}
481 
482 				// Here if the object is something to back up (file, symlink or dir, not excluded)
483 				// So make the information for adding to the checksum
484 
485 				#ifdef WIN32
486 				// We didn't stat the file before,
487 				// but now we need the information.
488 				if(emu_stat(filename.c_str(), &file_st) != 0)
489 				{
490  					rNotifier.NotifyFileStatFailed(this,
491  							filename,
492 							strerror(errno));
493 
494 					// Report the error (logs and
495 					// eventual email to administrator)
496 					SetErrorWhenReadingFilesystemObject(
497 						rParams, filename.c_str());
498 
499 					// Ignore this entry for now.
500 					continue;
501 				}
502 
503 				if(file_st.st_dev != link_st.st_dev)
504 				{
505  					rNotifier.NotifyMountPointSkipped(this,
506  							filename);
507 					continue;
508 				}
509 				#endif
510 
511 				checksum_info.mModificationTime = FileModificationTime(file_st);
512 				checksum_info.mAttributeModificationTime = FileAttrModificationTime(file_st);
513 				checksum_info.mSize = file_st.st_size;
514 				currentStateChecksum.Add(&checksum_info, sizeof(checksum_info));
515 				currentStateChecksum.Add(en->d_name, strlen(en->d_name));
516 
517 				// If the file has been modified madly into the future, download the
518 				// directory record anyway to ensure that it doesn't get uploaded
519 				// every single time the disc is scanned.
520 				if(checksum_info.mModificationTime > rParams.mUploadAfterThisTimeInTheFuture)
521 				{
522 					downloadDirectoryRecordBecauseOfFutureFiles = true;
523 					// Log that this has happened
524 					if(!rParams.mHaveLoggedWarningAboutFutureFileTimes)
525 					{
526 						rNotifier.NotifyFileModifiedInFuture(
527 							this, filename);
528 						rParams.mHaveLoggedWarningAboutFutureFileTimes = true;
529 					}
530 				}
531 			}
532 
533 			if(::closedir(dirHandle) != 0)
534 			{
535 				THROW_EXCEPTION(CommonException, OSFileError)
536 			}
537 			dirHandle = 0;
538 		}
539 		catch(...)
540 		{
541 			if(dirHandle != 0)
542 			{
543 				::closedir(dirHandle);
544 			}
545 			throw;
546 		}
547 	}
548 
549 	// Finish off the checksum, and compare with the one currently stored
550 	bool checksumDifferent = true;
551 	currentStateChecksum.Finish();
552 	if(mInitialSyncDone && currentStateChecksum.DigestMatches(mStateChecksum))
553 	{
554 		// The checksum is the same, and there was one to compare with
555 		checksumDifferent = false;
556 	}
557 
558 	// Pointer to potentially downloaded store directory info
559 	BackupStoreDirectory *pdirOnStore = 0;
560 
561 	try
562 	{
563 		// Want to get the directory listing?
564 		if(ThisDirHasJustBeenCreated)
565 		{
566 			// Avoid sending another command to the server when we know it's empty
567 			pdirOnStore = new BackupStoreDirectory(mObjectID, ContainingDirectoryID);
568 		}
569 		else
570 		{
571 			// Consider asking the store for it
572 			if(!mInitialSyncDone || checksumDifferent || downloadDirectoryRecordBecauseOfFutureFiles)
573 			{
574 				pdirOnStore = FetchDirectoryListing(rParams);
575 			}
576 		}
577 
578 		// Make sure the attributes are up to date -- if there's space on the server
579 		// and this directory has not just been created (because it's attributes will be correct in this case)
580 		// and the checksum is different, implying they *MIGHT* be different.
581 		if((!ThisDirHasJustBeenCreated) && checksumDifferent && (!rParams.mrContext.StorageLimitExceeded()))
582 		{
583 			UpdateAttributes(rParams, pdirOnStore, rLocalPath);
584 		}
585 
586 		// Create the list of pointers to directory entries
587 		std::vector<BackupStoreDirectory::Entry *> entriesLeftOver;
588 		if(pdirOnStore)
589 		{
590 			entriesLeftOver.resize(pdirOnStore->GetNumberOfEntries(), 0);
591 			BackupStoreDirectory::Iterator i(*pdirOnStore);
592 			// Copy in pointers to all the entries
593 			for(unsigned int l = 0; l < pdirOnStore->GetNumberOfEntries(); ++l)
594 			{
595 				entriesLeftOver[l] = i.Next();
596 			}
597 		}
598 
599 		// Do the directory reading
600 		bool updateCompleteSuccess = UpdateItems(rParams, rLocalPath,
601 			rRemotePath, pdirOnStore, entriesLeftOver, files, dirs);
602 
603 		// LAST THING! (think exception safety)
604 		// Store the new checksum -- don't fetch things unnecessarily in the future
605 		// But... only if 1) the storage limit isn't exceeded -- make sure things are done again if
606 		// the directory is modified later
607 		// and 2) All the objects within the directory were stored successfully.
608 		if(!rParams.mrContext.StorageLimitExceeded() && updateCompleteSuccess)
609 		{
610 			currentStateChecksum.CopyDigestTo(mStateChecksum);
611 		}
612 	}
613 	catch(...)
614 	{
615 		// Bad things have happened -- clean up
616 		if(pdirOnStore != 0)
617 		{
618 			delete pdirOnStore;
619 			pdirOnStore = 0;
620 		}
621 
622 		// Set things so that we get a full go at stuff later
623 		::memset(mStateChecksum, 0, sizeof(mStateChecksum));
624 
625 		throw;
626 	}
627 
628 	// Clean up directory on store
629 	if(pdirOnStore != 0)
630 	{
631 		delete pdirOnStore;
632 		pdirOnStore = 0;
633 	}
634 
635 	// Flag things as having happened.
636 	mInitialSyncDone = true;
637 	mSyncDone = true;
638 }
639 
640 // --------------------------------------------------------------------------
641 //
642 // Function
643 //		Name:    BackupClientDirectoryRecord::FetchDirectoryListing(BackupClientDirectoryRecord::SyncParams &)
644 //		Purpose: Fetch the directory listing of this directory from the store.
645 //		Created: 2003/10/09
646 //
647 // --------------------------------------------------------------------------
FetchDirectoryListing(BackupClientDirectoryRecord::SyncParams & rParams)648 BackupStoreDirectory *BackupClientDirectoryRecord::FetchDirectoryListing(BackupClientDirectoryRecord::SyncParams &rParams)
649 {
650 	BackupStoreDirectory *pdir = 0;
651 
652 	try
653 	{
654 		// Get connection to store
655 		BackupProtocolClient &connection(rParams.mrContext.GetConnection());
656 
657 		// Query the directory
658 		std::auto_ptr<BackupProtocolClientSuccess> dirreply(connection.QueryListDirectory(
659 				mObjectID,
660 				BackupProtocolClientListDirectory::Flags_INCLUDE_EVERYTHING,	// both files and directories
661 				BackupProtocolClientListDirectory::Flags_Deleted | BackupProtocolClientListDirectory::Flags_OldVersion, // exclude old/deleted stuff
662 				true /* want attributes */));
663 
664 		// Retrieve the directory from the stream following
665 		pdir = new BackupStoreDirectory;
666 		ASSERT(pdir != 0);
667 		std::auto_ptr<IOStream> dirstream(connection.ReceiveStream());
668 		pdir->ReadFromStream(*dirstream, connection.GetTimeout());
669 	}
670 	catch(...)
671 	{
672 		delete pdir;
673 		pdir = 0;
674 		throw;
675 	}
676 
677 	return pdir;
678 }
679 
680 
681 // --------------------------------------------------------------------------
682 //
683 // Function
684 //		Name:    BackupClientDirectoryRecord::UpdateAttributes(BackupClientDirectoryRecord::SyncParams &, const std::string &)
685 //		Purpose: Sets the attributes of the directory on the store, if necessary
686 //		Created: 2003/10/09
687 //
688 // --------------------------------------------------------------------------
UpdateAttributes(BackupClientDirectoryRecord::SyncParams & rParams,BackupStoreDirectory * pDirOnStore,const std::string & rLocalPath)689 void BackupClientDirectoryRecord::UpdateAttributes(BackupClientDirectoryRecord::SyncParams &rParams, BackupStoreDirectory *pDirOnStore, const std::string &rLocalPath)
690 {
691 	// Get attributes for the directory
692 	BackupClientFileAttributes attr;
693 	box_time_t attrModTime = 0;
694 	attr.ReadAttributes(rLocalPath.c_str(), true /* directories have zero mod times */,
695 		0 /* no modification time */, &attrModTime);
696 
697 	// Assume attributes need updating, unless proved otherwise
698 	bool updateAttr = true;
699 
700 	// Got a listing to compare with?
701 	ASSERT(pDirOnStore == 0 || (pDirOnStore != 0 && pDirOnStore->HasAttributes()));
702 	if(pDirOnStore != 0 && pDirOnStore->HasAttributes())
703 	{
704 		const StreamableMemBlock &storeAttrEnc(pDirOnStore->GetAttributes());
705 		// Explict decryption
706 		BackupClientFileAttributes storeAttr(storeAttrEnc);
707 
708 		// Compare the attributes
709 		if(attr.Compare(storeAttr, true,
710 			true /* ignore both modification times */))
711 		{
712 			// No update necessary
713 			updateAttr = false;
714 		}
715 	}
716 
717 	// Update them?
718 	if(updateAttr)
719 	{
720 		// Get connection to store
721 		BackupProtocolClient &connection(rParams.mrContext.GetConnection());
722 
723 		// Exception thrown if this doesn't work
724 		MemBlockStream attrStream(attr);
725 		connection.QueryChangeDirAttributes(mObjectID, attrModTime, attrStream);
726 	}
727 }
728 
729 
730 // --------------------------------------------------------------------------
731 //
732 // Function
733 //		Name:    BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncParams &, const std::string &, BackupStoreDirectory *, std::vector<BackupStoreDirectory::Entry *> &)
734 //		Purpose: Update the items stored on the server. The rFiles vector will be erased after it's used to save space.
735 //				 Returns true if all items were updated successfully. (If not, the failures will have been logged).
736 //		Created: 2003/10/09
737 //
738 // --------------------------------------------------------------------------
UpdateItems(BackupClientDirectoryRecord::SyncParams & rParams,const std::string & rLocalPath,const std::string & rRemotePath,BackupStoreDirectory * pDirOnStore,std::vector<BackupStoreDirectory::Entry * > & rEntriesLeftOver,std::vector<std::string> & rFiles,const std::vector<std::string> & rDirs)739 bool BackupClientDirectoryRecord::UpdateItems(
740 	BackupClientDirectoryRecord::SyncParams &rParams,
741 	const std::string &rLocalPath,
742 	const std::string &rRemotePath,
743 	BackupStoreDirectory *pDirOnStore,
744 	std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
745 	std::vector<std::string> &rFiles,
746 	const std::vector<std::string> &rDirs)
747 {
748 	BackupClientContext& rContext(rParams.mrContext);
749 	ProgressNotifier& rNotifier(rContext.GetProgressNotifier());
750 
751 	bool allUpdatedSuccessfully = true;
752 
753 	// Decrypt all the directory entries.
754 	// It would be nice to be able to just compare the encrypted versions, however this doesn't work
755 	// in practise because there can be multiple encodings of the same filename using different
756 	// methods (although each method will result in the same string for the same filename.) This
757 	// happens when the server fixes a broken store, and gives plain text generated filenames.
758 	// So if we didn't do things like this, then you wouldn't be able to recover from bad things
759 	// happening with the server.
760 	DecryptedEntriesMap_t decryptedEntries;
761 	if(pDirOnStore != 0)
762 	{
763 		BackupStoreDirectory::Iterator i(*pDirOnStore);
764 		BackupStoreDirectory::Entry *en = 0;
765 		while((en = i.Next()) != 0)
766 		{
767 			decryptedEntries[BackupStoreFilenameClear(en->GetName()).GetClearFilename()] = en;
768 		}
769 	}
770 
771 	// Do files
772 	for(std::vector<std::string>::const_iterator f = rFiles.begin();
773 		f != rFiles.end(); ++f)
774 	{
775 		// Send keep-alive message if needed
776 		rContext.DoKeepAlive();
777 
778 		// Filename of this file
779 		std::string filename(MakeFullPath(rLocalPath, *f));
780 
781 		// Get relevant info about file
782 		box_time_t modTime = 0;
783 		uint64_t attributesHash = 0;
784 		int64_t fileSize = 0;
785 		InodeRefType inodeNum = 0;
786 		bool hasMultipleHardLinks = true;
787 		// BLOCK
788 		{
789 			// Stat the file
790 			EMU_STRUCT_STAT st;
791 			if(EMU_LSTAT(filename.c_str(), &st) != 0)
792 			{
793 				rNotifier.NotifyFileStatFailed(this,
794 					filename, strerror(errno));
795 
796 				// Report the error (logs and
797 				// eventual email to administrator)
798 				SetErrorWhenReadingFilesystemObject(rParams,
799 					filename.c_str());
800 
801 				// Ignore this entry for now.
802 				continue;
803 			}
804 
805 			// Extract required data
806 			modTime = FileModificationTime(st);
807 			fileSize = st.st_size;
808 			inodeNum = st.st_ino;
809 			hasMultipleHardLinks = (st.st_nlink > 1);
810 			attributesHash = BackupClientFileAttributes::GenerateAttributeHash(st, filename, *f);
811 		}
812 
813 		// See if it's in the listing (if we have one)
814 		BackupStoreFilenameClear storeFilename(*f);
815 		BackupStoreDirectory::Entry *en = 0;
816 		int64_t latestObjectID = 0;
817 		if(pDirOnStore != 0)
818 		{
819 			DecryptedEntriesMap_t::iterator i(decryptedEntries.find(*f));
820 			if(i != decryptedEntries.end())
821 			{
822 				en = i->second;
823 				latestObjectID = en->GetObjectID();
824 			}
825 		}
826 
827 		// Check that the entry which might have been found is in fact a file
828 		if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) == 0))
829 		{
830 			// Directory exists in the place of this file -- sort it out
831 			RemoveDirectoryInPlaceOfFile(rParams, pDirOnStore,
832 				en, *f);
833 			en = 0;
834 		}
835 
836 		// Check for renaming?
837 		if(pDirOnStore != 0 && en == 0)
838 		{
839 			// We now know...
840 			// 1) File has just been added
841 			// 2) It's not in the store
842 
843 			// Do we know about the inode number?
844 			const BackupClientInodeToIDMap &idMap(rContext.GetCurrentIDMap());
845 			int64_t renameObjectID = 0, renameInDirectory = 0;
846 			if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
847 			{
848 				// Look up on the server to get the name, to build the local filename
849 				std::string localPotentialOldName;
850 				bool isDir = false;
851 				bool isCurrentVersion = false;
852 				box_time_t srvModTime = 0, srvAttributesHash = 0;
853 				BackupStoreFilenameClear oldLeafname;
854 				if(rContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion, &srvModTime, &srvAttributesHash, &oldLeafname))
855 				{
856 					// Only interested if it's a file and the latest version
857 					if(!isDir && isCurrentVersion)
858 					{
859 						// Check that the object we found in the ID map doesn't exist on disc
860 						EMU_STRUCT_STAT st;
861 						if(EMU_STAT(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
862 						{
863 							// Doesn't exist locally, but does exist on the server.
864 							// Therefore we can safely rename it to this new file.
865 
866 							// Get the connection to the server
867 							BackupProtocolClient &connection(rContext.GetConnection());
868 
869 							// Only do this step if there is room on the server.
870 							// This step will be repeated later when there is space available
871 							if(!rContext.StorageLimitExceeded())
872 							{
873 								// Rename the existing files (ie include old versions) on the server
874 								connection.QueryMoveObject(renameObjectID, renameInDirectory, mObjectID /* move to this directory */,
875 									BackupProtocolClientMoveObject::Flags_MoveAllWithSameName | BackupProtocolClientMoveObject::Flags_AllowMoveOverDeletedObject,
876 									storeFilename);
877 
878 								// Stop the attempt to delete the file in the original location
879 								BackupClientDeleteList &rdelList(rContext.GetDeleteList());
880 								rdelList.StopFileDeletion(renameInDirectory, oldLeafname);
881 
882 								// Create new entry in the directory for it
883 								// -- will be near enough what's actually on the server for the rest to work.
884 								en = pDirOnStore->AddEntry(storeFilename, srvModTime, renameObjectID, 0 /* size in blocks unknown, but not needed */,
885 									BackupStoreDirectory::Entry::Flags_File, srvAttributesHash);
886 
887 								// Store the object ID for the inode lookup map later
888 								latestObjectID = renameObjectID;
889 							}
890 						}
891 					}
892 				}
893 			}
894 		}
895 
896 		// Is it in the mPendingEntries list?
897 		box_time_t pendingFirstSeenTime = 0;		// ie not seen
898 		if(mpPendingEntries != 0)
899 		{
900 			std::map<std::string, box_time_t>::const_iterator i(mpPendingEntries->find(*f));
901 			if(i != mpPendingEntries->end())
902 			{
903 				// found it -- set flag
904 				pendingFirstSeenTime = i->second;
905 			}
906 		}
907 
908 		// If pDirOnStore == 0, then this must have been after an initial sync:
909 		ASSERT(pDirOnStore != 0 || mInitialSyncDone);
910 		// So, if pDirOnStore == 0, then we know that everything before syncPeriodStart
911 		// is either on the server, or in the toupload list. If the directory had changed,
912 		// we'd have got a directory listing.
913 		//
914 		// At this point, if (pDirOnStore == 0 && en == 0), we can assume it's on the server with a
915 		// mod time < syncPeriodStart, or didn't exist before that time.
916 		//
917 		// But if en != 0, then we need to compare modification times to avoid uploading it again.
918 
919 		// Need to update?
920 		//
921 		// Condition for upload:
922 		//    modification time within sync period
923 		//    if it's been seen before but not uploaded, is the time from this first sight longer than the MaxUploadWait
924 		//	  and if we know about it from a directory listing, that it hasn't got the same upload time as on the store
925 
926 		bool doUpload = false;
927 
928 		// Only upload a file if the mod time locally is
929 		// different to that on the server.
930 
931 		if (en == 0 || en->GetModificationTime() != modTime)
932 		{
933 			// Check the file modified within the acceptable time period we're checking
934 			// If the file isn't on the server, the acceptable time starts at zero.
935 			// Check pDirOnStore and en, because if we didn't download a directory listing,
936 			// pDirOnStore will be zero, but we know it's on the server.
937 			if (modTime < rParams.mSyncPeriodEnd)
938 			{
939 				if (pDirOnStore != 0 && en == 0)
940 				{
941 					doUpload = true;
942 					BOX_TRACE("Upload decision: " <<
943 						filename << ": will upload "
944 						"(not on server)");
945 				}
946 				else if (modTime >= rParams.mSyncPeriodStart)
947 				{
948 					doUpload = true;
949 					BOX_TRACE("Upload decision: " <<
950 						filename << ": will upload "
951 						"(modified since last sync)");
952 				}
953 			}
954 
955 			// However, just in case things are continually
956 			// modified, we check the first seen time.
957 			// The two compares of syncPeriodEnd and
958 			// pendingFirstSeenTime are because the values
959 			// are unsigned.
960 
961 			if (!doUpload &&
962 				pendingFirstSeenTime != 0 &&
963 				rParams.mSyncPeriodEnd > pendingFirstSeenTime &&
964 				(rParams.mSyncPeriodEnd - pendingFirstSeenTime)
965 				> rParams.mMaxUploadWait)
966 			{
967 				doUpload = true;
968 				BOX_TRACE("Upload decision: " <<
969 					filename << ": will upload "
970 					"(continually modified)");
971 			}
972 
973 			// Then make sure that if files are added with a
974 			// time less than the sync period start
975 			// (which can easily happen on file server), it
976 			// gets uploaded. The directory contents checksum
977 			// will pick up the fact it has been added, so the
978 			// store listing will be available when this happens.
979 
980 			if (!doUpload &&
981 				modTime <= rParams.mSyncPeriodStart &&
982 				en != 0 &&
983 				en->GetModificationTime() != modTime)
984 			{
985 				doUpload = true;
986 				BOX_TRACE("Upload decision: " <<
987 					filename << ": will upload "
988 					"(mod time changed)");
989 			}
990 
991 			// And just to catch really badly off clocks in
992 			// the future for file server clients,
993 			// just upload the file if it's madly in the future.
994 
995 			if (!doUpload && modTime >
996 				rParams.mUploadAfterThisTimeInTheFuture)
997 			{
998 				doUpload = true;
999 				BOX_TRACE("Upload decision: " <<
1000 					filename << ": will upload "
1001 					"(mod time in the future)");
1002 			}
1003 		}
1004 
1005 		if (en != 0 && en->GetModificationTime() == modTime)
1006 		{
1007 			BOX_TRACE("Upload decision: " <<
1008 				filename << ": will not upload "
1009 				"(not modified since last upload)");
1010 		}
1011 		else if (!doUpload)
1012 		{
1013 			if (modTime > rParams.mSyncPeriodEnd)
1014 			{
1015 				box_time_t now = GetCurrentBoxTime();
1016 				int age = BoxTimeToSeconds(now -
1017 					modTime);
1018 				BOX_TRACE("Upload decision: " <<
1019 					filename << ": will not upload "
1020 					"(modified too recently: "
1021 					"only " << age << " seconds ago)");
1022 			}
1023 			else
1024 			{
1025 				BOX_TRACE("Upload decision: " <<
1026 					filename << ": will not upload "
1027 					"(mod time is " << modTime <<
1028 					" which is outside sync window, "
1029 					<< rParams.mSyncPeriodStart << " to "
1030 					<< rParams.mSyncPeriodEnd << ")");
1031 			}
1032 		}
1033 
1034 		bool fileSynced = true;
1035 
1036 		if (doUpload)
1037 		{
1038 			// Upload needed, don't mark sync success until
1039 			// we've actually done it
1040 			fileSynced = false;
1041 
1042 			// Make sure we're connected -- must connect here so we know whether
1043 			// the storage limit has been exceeded, and hence whether or not
1044 			// to actually upload the file.
1045 			rContext.GetConnection();
1046 
1047 			// Only do this step if there is room on the server.
1048 			// This step will be repeated later when there is space available
1049 			if(!rContext.StorageLimitExceeded())
1050 			{
1051 				// Upload the file to the server, recording the
1052 				// object ID it returns
1053 				bool noPreviousVersionOnServer =
1054 					((pDirOnStore != 0) && (en == 0));
1055 
1056 				// Surround this in a try/catch block, to
1057 				// catch errors, but still continue
1058 				bool uploadSuccess = false;
1059 				try
1060 				{
1061 					latestObjectID = UploadFile(rParams,
1062 						filename, storeFilename,
1063 						fileSize, modTime,
1064 						attributesHash,
1065 						noPreviousVersionOnServer);
1066 
1067 					if (latestObjectID == 0)
1068 					{
1069 						// storage limit exceeded
1070 						rParams.mrContext.SetStorageLimitExceeded();
1071 						uploadSuccess = false;
1072 						allUpdatedSuccessfully = false;
1073 					}
1074 					else
1075 					{
1076 						uploadSuccess = true;
1077 					}
1078 				}
1079 				catch(ConnectionException &e)
1080 				{
1081 					// Connection errors should just be
1082 					// passed on to the main handler,
1083 					// retries would probably just cause
1084 					// more problems.
1085 					rNotifier.NotifyFileUploadException(
1086 						this, filename, e);
1087 					throw;
1088 				}
1089 				catch(BoxException &e)
1090 				{
1091 					if (e.GetType() == BackupStoreException::ExceptionType &&
1092 						e.GetSubType() == BackupStoreException::SignalReceived)
1093 					{
1094 						// abort requested, pass the
1095 						// exception on up.
1096 						throw;
1097 					}
1098 
1099 					// an error occured -- make return
1100 					// code false, to show error in directory
1101 					allUpdatedSuccessfully = false;
1102 					// Log it.
1103 					SetErrorWhenReadingFilesystemObject(rParams, filename.c_str());
1104 					rNotifier.NotifyFileUploadException(
1105 						this, filename, e);
1106 				}
1107 
1108 				// Update structures if the file was uploaded
1109 				// successfully.
1110 				if(uploadSuccess)
1111 				{
1112 					fileSynced = true;
1113 
1114 					// delete from pending entries
1115 					if(pendingFirstSeenTime != 0 && mpPendingEntries != 0)
1116 					{
1117 						mpPendingEntries->erase(*f);
1118 					}
1119 				}
1120 			}
1121 			else
1122 			{
1123 				rNotifier.NotifyFileSkippedServerFull(this,
1124 					filename);
1125 			}
1126 		}
1127 		else if(en != 0 && en->GetAttributesHash() != attributesHash)
1128 		{
1129 			// Attributes have probably changed, upload them again.
1130 			// If the attributes have changed enough, the directory
1131 			// hash will have changed too, and so the dir will have
1132 			// been downloaded, and the entry will be available.
1133 
1134 			// Get connection
1135 			BackupProtocolClient &connection(rContext.GetConnection());
1136 
1137 			// Only do this step if there is room on the server.
1138 			// This step will be repeated later when there is
1139 			// space available
1140 			if(!rContext.StorageLimitExceeded())
1141 			{
1142 				try
1143 				{
1144 					rNotifier.NotifyFileUploadingAttributes(
1145 						this, filename);
1146 
1147 					// Update store
1148 					BackupClientFileAttributes attr;
1149 					attr.ReadAttributes(filename.c_str(), false /* put mod times in the attributes, please */);
1150 					MemBlockStream attrStream(attr);
1151 					connection.QuerySetReplacementFileAttributes(mObjectID, attributesHash, storeFilename, attrStream);
1152 					fileSynced = true;
1153 				}
1154 				catch (BoxException &e)
1155 				{
1156 					BOX_ERROR("Failed to read or store "
1157 						"file attributes for '" <<
1158 						filename << "', will try "
1159 						"again later");
1160 				}
1161 			}
1162 		}
1163 
1164 		if(modTime >= rParams.mSyncPeriodEnd)
1165 		{
1166 			// Allocate?
1167 			if(mpPendingEntries == 0)
1168 			{
1169 				mpPendingEntries = new std::map<std::string, box_time_t>;
1170 			}
1171 			// Adding to mPendingEntries list
1172 			if(pendingFirstSeenTime == 0)
1173 			{
1174 				// Haven't seen this before -- add to list!
1175 				(*mpPendingEntries)[*f] = modTime;
1176 			}
1177 		}
1178 
1179 		// Zero pointer in rEntriesLeftOver, if we have a pointer to zero
1180 		if(en != 0)
1181 		{
1182 			for(unsigned int l = 0; l < rEntriesLeftOver.size(); ++l)
1183 			{
1184 				if(rEntriesLeftOver[l] == en)
1185 				{
1186 					rEntriesLeftOver[l] = 0;
1187 					break;
1188 				}
1189 			}
1190 		}
1191 
1192 		// Does this file need an entry in the ID map?
1193 		if(fileSize >= rParams.mFileTrackingSizeThreshold)
1194 		{
1195 			// Get the map
1196 			BackupClientInodeToIDMap &idMap(rContext.GetNewIDMap());
1197 
1198 			// Need to get an ID from somewhere...
1199 			if(latestObjectID != 0)
1200 			{
1201 				// Use this one
1202 				BOX_TRACE("Storing uploaded file ID " <<
1203 					inodeNum << " (" << filename << ") "
1204 					"in ID map as object " <<
1205 					latestObjectID << " with parent " <<
1206 					mObjectID);
1207 				idMap.AddToMap(inodeNum, latestObjectID, mObjectID /* containing directory */);
1208 			}
1209 			else
1210 			{
1211 				// Don't know it -- haven't sent anything to the store, and didn't get a listing.
1212 				// Look it up in the current map, and if it's there, use that.
1213 				const BackupClientInodeToIDMap &currentIDMap(rContext.GetCurrentIDMap());
1214 				int64_t objid = 0, dirid = 0;
1215 				if(currentIDMap.Lookup(inodeNum, objid, dirid))
1216 				{
1217 					// Found
1218 					if (dirid != mObjectID)
1219 					{
1220 						BOX_WARNING("Found conflicting parent ID for file ID " << inodeNum << " (" << filename << "): expected " << mObjectID << " but found " << dirid << " (same directory used in two different locations?)");
1221 					}
1222 
1223 					ASSERT(dirid == mObjectID);
1224 
1225 					// NOTE: If the above assert fails, an inode number has been reused by the OS,
1226 					// or there is a problem somewhere. If this happened on a short test run, look
1227 					// into it. However, in a long running process this may happen occasionally and
1228 					// not indicate anything wrong.
1229 					// Run the release version for real life use, where this check is not made.
1230 					BOX_TRACE("Storing found file ID " <<
1231 						inodeNum << " (" << filename <<
1232 						") in ID map as object " <<
1233 						objid << " with parent " <<
1234 						mObjectID);
1235 					idMap.AddToMap(inodeNum, objid,
1236 						mObjectID /* containing directory */);
1237 				}
1238 			}
1239 		}
1240 
1241 		if (fileSynced)
1242 		{
1243 			rNotifier.NotifyFileSynchronised(this, filename,
1244 				fileSize);
1245 		}
1246 	}
1247 
1248 	// Erase contents of files to save space when recursing
1249 	rFiles.clear();
1250 
1251 	// Delete the pending entries, if the map is entry
1252 	if(mpPendingEntries != 0 && mpPendingEntries->size() == 0)
1253 	{
1254 		BOX_TRACE("Deleting mpPendingEntries from dir ID " <<
1255 			BOX_FORMAT_OBJECTID(mObjectID));
1256 		delete mpPendingEntries;
1257 		mpPendingEntries = 0;
1258 	}
1259 
1260 	// Do directories
1261 	for(std::vector<std::string>::const_iterator d = rDirs.begin();
1262 		d != rDirs.end(); ++d)
1263 	{
1264 		// Send keep-alive message if needed
1265 		rContext.DoKeepAlive();
1266 
1267 		// Get the local filename
1268 		std::string dirname(MakeFullPath(rLocalPath, *d));
1269 
1270 		// See if it's in the listing (if we have one)
1271 		BackupStoreFilenameClear storeFilename(*d);
1272 		BackupStoreDirectory::Entry *en = 0;
1273 		if(pDirOnStore != 0)
1274 		{
1275 			DecryptedEntriesMap_t::iterator i(decryptedEntries.find(*d));
1276 			if(i != decryptedEntries.end())
1277 			{
1278 				en = i->second;
1279 			}
1280 		}
1281 
1282 		// Check that the entry which might have been found is in fact a directory
1283 		if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) == 0))
1284 		{
1285 			// Entry exists, but is not a directory. Bad.
1286 			// Get rid of it.
1287 			BackupProtocolClient &connection(rContext.GetConnection());
1288 			connection.QueryDeleteFile(mObjectID /* in directory */, storeFilename);
1289 			rNotifier.NotifyFileDeleted(en->GetObjectID(),
1290 				storeFilename.GetClearFilename());
1291 
1292 			// Nothing found
1293 			en = 0;
1294 		}
1295 
1296 		// Flag for having created directory, so can optimise the
1297 		// recursive call not to read it again, because we know
1298 		// it's empty.
1299 		bool haveJustCreatedDirOnServer = false;
1300 
1301 		// Next, see if it's in the list of sub directories
1302 		BackupClientDirectoryRecord *psubDirRecord = 0;
1303 		std::map<std::string, BackupClientDirectoryRecord *>::iterator
1304 			e(mSubDirectories.find(*d));
1305 
1306 		if(e != mSubDirectories.end())
1307 		{
1308 			// In the list, just use this pointer
1309 			psubDirRecord = e->second;
1310 		}
1311 		else
1312 		{
1313 			// Note: if we have exceeded our storage limit, then
1314 			// we should not upload any more data, nor create any
1315 			// DirectoryRecord representing data that would have
1316 			// been uploaded. This step will be repeated when
1317 			// there is some space available.
1318 			bool doCreateDirectoryRecord = true;
1319 
1320 			// Need to create the record. But do we need to create the directory on the server?
1321 			int64_t subDirObjectID = 0;
1322 			if(en != 0)
1323 			{
1324 				// No. Exists on the server, and we know about it from the listing.
1325 				subDirObjectID = en->GetObjectID();
1326 			}
1327 			else if(rContext.StorageLimitExceeded())
1328 			// know we've got a connection if we get this far,
1329 			// as dir will have been modified.
1330 			{
1331 				doCreateDirectoryRecord = false;
1332 			}
1333 			else
1334 			{
1335 				// Yes, creation required!
1336 				// It is known that the it doesn't exist:
1337 				//   if pDirOnStore == 0, then the directory has had an initial sync, and hasn't been modified.
1338 				//	 so it has definately been created already.
1339 				//   if en == 0 but pDirOnStore != 0, well... obviously it doesn't exist.
1340 
1341 				// Get attributes
1342 				box_time_t attrModTime = 0;
1343 				InodeRefType inodeNum = 0;
1344 				BackupClientFileAttributes attr;
1345 				bool failedToReadAttributes = false;
1346 
1347 				try
1348 				{
1349 					attr.ReadAttributes(dirname.c_str(),
1350 						true /* directories have zero mod times */,
1351 						0 /* not interested in mod time */,
1352 						&attrModTime, 0 /* not file size */,
1353 						&inodeNum);
1354 				}
1355 				catch (BoxException &e)
1356 				{
1357 					BOX_WARNING("Failed to read attributes "
1358 						"of directory, cannot check "
1359 						"for rename, assuming new: '"
1360 						<< dirname << "'");
1361 					failedToReadAttributes = true;
1362 				}
1363 
1364 				// Check to see if the directory been renamed
1365 				// First, do we have a record in the ID map?
1366 				int64_t renameObjectID = 0, renameInDirectory = 0;
1367 				bool renameDir = false;
1368 				const BackupClientInodeToIDMap &idMap(
1369 					rContext.GetCurrentIDMap());
1370 
1371 				if(!failedToReadAttributes && idMap.Lookup(inodeNum,
1372 					renameObjectID, renameInDirectory))
1373 				{
1374 					// Look up on the server to get the name, to build the local filename
1375 					std::string localPotentialOldName;
1376 					bool isDir = false;
1377 					bool isCurrentVersion = false;
1378 					if(rContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion))
1379 					{
1380 						// Only interested if it's a directory
1381 						if(isDir && isCurrentVersion)
1382 						{
1383 							// Check that the object doesn't exist already
1384 							EMU_STRUCT_STAT st;
1385 							if(EMU_STAT(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
1386 							{
1387 								// Doesn't exist locally, but does exist on the server.
1388 								// Therefore we can safely rename it.
1389 								renameDir = true;
1390 							}
1391 						}
1392 					}
1393 				}
1394 
1395 				// Get connection
1396 				BackupProtocolClient &connection(rContext.GetConnection());
1397 
1398 				// Don't do a check for storage limit exceeded here, because if we get to this
1399 				// stage, a connection will have been opened, and the status known, so the check
1400 				// in the else if(...) above will be correct.
1401 
1402 				// Build attribute stream for sending
1403 				MemBlockStream attrStream(attr);
1404 
1405 				if(renameDir)
1406 				{
1407 					// Rename the existing directory on the server
1408 					connection.QueryMoveObject(renameObjectID, renameInDirectory, mObjectID /* move to this directory */,
1409 						BackupProtocolClientMoveObject::Flags_MoveAllWithSameName | BackupProtocolClientMoveObject::Flags_AllowMoveOverDeletedObject,
1410 						storeFilename);
1411 
1412 					// Put the latest attributes on it
1413 					connection.QueryChangeDirAttributes(renameObjectID, attrModTime, attrStream);
1414 
1415 					// Stop it being deleted later
1416 					BackupClientDeleteList &rdelList(
1417 						rContext.GetDeleteList());
1418 					rdelList.StopDirectoryDeletion(renameObjectID);
1419 
1420 					// This is the ID for the renamed directory
1421 					subDirObjectID = renameObjectID;
1422 				}
1423 				else
1424 				{
1425 					// Create a new directory
1426 					std::auto_ptr<BackupProtocolClientSuccess> dirCreate(connection.QueryCreateDirectory(
1427 						mObjectID, attrModTime, storeFilename, attrStream));
1428 					subDirObjectID = dirCreate->GetObjectID();
1429 
1430 					// Flag as having done this for optimisation later
1431 					haveJustCreatedDirOnServer = true;
1432 				}
1433 			}
1434 
1435 			if (doCreateDirectoryRecord)
1436 			{
1437 				// New an object for this
1438 				psubDirRecord = new BackupClientDirectoryRecord(subDirObjectID, *d);
1439 
1440 				// Store in list
1441 				try
1442 				{
1443 					mSubDirectories[*d] = psubDirRecord;
1444 				}
1445 				catch(...)
1446 				{
1447 					delete psubDirRecord;
1448 					psubDirRecord = 0;
1449 					throw;
1450 				}
1451 			}
1452 		}
1453 
1454 		ASSERT(psubDirRecord != 0 || rContext.StorageLimitExceeded());
1455 
1456 		if(psubDirRecord)
1457 		{
1458 			// Sync this sub directory too
1459 			psubDirRecord->SyncDirectory(rParams, mObjectID,
1460 				dirname, rRemotePath + "/" + *d,
1461 				haveJustCreatedDirOnServer);
1462 		}
1463 
1464 		// Zero pointer in rEntriesLeftOver, if we have a pointer to zero
1465 		if(en != 0)
1466 		{
1467 			for(unsigned int l = 0; l < rEntriesLeftOver.size(); ++l)
1468 			{
1469 				if(rEntriesLeftOver[l] == en)
1470 				{
1471 					rEntriesLeftOver[l] = 0;
1472 					break;
1473 				}
1474 			}
1475 		}
1476 	}
1477 
1478 	// Delete everything which is on the store, but not on disc
1479 	for(unsigned int l = 0; l < rEntriesLeftOver.size(); ++l)
1480 	{
1481 		if(rEntriesLeftOver[l] != 0)
1482 		{
1483 			BackupStoreDirectory::Entry *en = rEntriesLeftOver[l];
1484 
1485 			// These entries can't be deleted immediately, as it would prevent
1486 			// renaming and moving of objects working properly. So we add them
1487 			// to a list, which is actually deleted at the very end of the session.
1488 			// If there's an error during the process, it doesn't matter if things
1489 			// aren't actually deleted, as the whole state will be reset anyway.
1490 			BackupClientDeleteList &rdel(rContext.GetDeleteList());
1491 
1492 			BackupStoreFilenameClear clear(en->GetName());
1493 			std::string localName = MakeFullPath(rLocalPath,
1494 				clear.GetClearFilename());
1495 
1496 			// Delete this entry -- file or directory?
1497 			if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) != 0)
1498 			{
1499 				// Set a pending deletion for the file
1500 				rdel.AddFileDelete(mObjectID, en->GetName(),
1501 					localName);
1502 			}
1503 			else if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) != 0)
1504 			{
1505 				// Set as a pending deletion for the directory
1506 				rdel.AddDirectoryDelete(en->GetObjectID(),
1507 					localName);
1508 
1509 				// If there's a directory record for it in
1510 				// the sub directory map, delete it now
1511 				BackupStoreFilenameClear dirname(en->GetName());
1512 				std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(dirname.GetClearFilename()));
1513 				if(e != mSubDirectories.end())
1514 				{
1515 					// Carefully delete the entry from the map
1516 					BackupClientDirectoryRecord *rec = e->second;
1517 					mSubDirectories.erase(e);
1518 					delete rec;
1519 
1520 					std::string name = MakeFullPath(
1521 						rLocalPath,
1522 						dirname.GetClearFilename());
1523 
1524 					BOX_TRACE("Deleted directory record "
1525 						"for " << name);
1526 				}
1527 			}
1528 		}
1529 	}
1530 
1531 	// Return success flag (will be false if some files failed)
1532 	return allUpdatedSuccessfully;
1533 }
1534 
1535 
1536 // --------------------------------------------------------------------------
1537 //
1538 // Function
1539 //		Name:    BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(SyncParams &, BackupStoreDirectory *, int64_t, const std::string &)
1540 //		Purpose: Called to resolve difficulties when a directory is found on the
1541 //				 store where a file is to be uploaded.
1542 //		Created: 9/7/04
1543 //
1544 // --------------------------------------------------------------------------
RemoveDirectoryInPlaceOfFile(SyncParams & rParams,BackupStoreDirectory * pDirOnStore,BackupStoreDirectory::Entry * pEntry,const std::string & rFilename)1545 void BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(
1546 	SyncParams &rParams,
1547 	BackupStoreDirectory* pDirOnStore,
1548 	BackupStoreDirectory::Entry* pEntry,
1549 	const std::string &rFilename)
1550 {
1551 	// First, delete the directory
1552 	BackupProtocolClient &connection(rParams.mrContext.GetConnection());
1553 	connection.QueryDeleteDirectory(pEntry->GetObjectID());
1554 
1555 	BackupStoreFilenameClear clear(pEntry->GetName());
1556 	rParams.mrContext.GetProgressNotifier().NotifyDirectoryDeleted(
1557 		pEntry->GetObjectID(), clear.GetClearFilename());
1558 
1559 	// Then, delete any directory record
1560 	std::map<std::string, BackupClientDirectoryRecord *>::iterator
1561 		e(mSubDirectories.find(rFilename));
1562 
1563 	if(e != mSubDirectories.end())
1564 	{
1565 		// A record exists for this, remove it
1566 		BackupClientDirectoryRecord *psubDirRecord = e->second;
1567 		mSubDirectories.erase(e);
1568 
1569 		// And delete the object
1570 		delete psubDirRecord;
1571 	}
1572 }
1573 
1574 
1575 
1576 // --------------------------------------------------------------------------
1577 //
1578 // Function
1579 //		Name:    BackupClientDirectoryRecord::UploadFile(
1580 //			 BackupClientDirectoryRecord::SyncParams &,
1581 //			 const std::string &,
1582 //			 const BackupStoreFilename &,
1583 //			 int64_t, box_time_t, box_time_t, bool)
1584 //		Purpose: Private. Upload a file to the server. May send
1585 //			 a patch instead of the whole thing
1586 //		Created: 20/1/04
1587 //
1588 // --------------------------------------------------------------------------
UploadFile(BackupClientDirectoryRecord::SyncParams & rParams,const std::string & rFilename,const BackupStoreFilename & rStoreFilename,int64_t FileSize,box_time_t ModificationTime,box_time_t AttributesHash,bool NoPreviousVersionOnServer)1589 int64_t BackupClientDirectoryRecord::UploadFile(
1590 	BackupClientDirectoryRecord::SyncParams &rParams,
1591 	const std::string &rFilename,
1592 	const BackupStoreFilename &rStoreFilename,
1593 	int64_t FileSize,
1594 	box_time_t ModificationTime,
1595 	box_time_t AttributesHash,
1596 	bool NoPreviousVersionOnServer)
1597 {
1598 	BackupClientContext& rContext(rParams.mrContext);
1599 	ProgressNotifier& rNotifier(rContext.GetProgressNotifier());
1600 
1601 	// Get the connection
1602 	BackupProtocolClient &connection(rContext.GetConnection());
1603 
1604 	// Info
1605 	int64_t objID = 0;
1606 	bool doNormalUpload = true;
1607 	int64_t uploadedSize = -1;
1608 
1609 	// Use a try block to catch store full errors
1610 	try
1611 	{
1612 		// Might an old version be on the server, and is the file
1613 		// size over the diffing threshold?
1614 		if(!NoPreviousVersionOnServer &&
1615 			FileSize >= rParams.mDiffingUploadSizeThreshold)
1616 		{
1617 			// YES -- try to do diff, if possible
1618 			// First, query the server to see if there's an old version available
1619 			std::auto_ptr<BackupProtocolClientSuccess> getBlockIndex(connection.QueryGetBlockIndexByName(mObjectID, rStoreFilename));
1620 			int64_t diffFromID = getBlockIndex->GetObjectID();
1621 
1622 			if(diffFromID != 0)
1623 			{
1624 				// Found an old version
1625 				rNotifier.NotifyFileUploadingPatch(this,
1626 					rFilename);
1627 
1628 				// Get the index
1629 				std::auto_ptr<IOStream> blockIndexStream(connection.ReceiveStream());
1630 
1631 				//
1632 				// Diff the file
1633 				//
1634 
1635 				rContext.ManageDiffProcess();
1636 
1637 				bool isCompletelyDifferent = false;
1638 
1639 				std::auto_ptr<IOStream> patchStream(
1640 					BackupStoreFile::EncodeFileDiff(
1641 						rFilename.c_str(),
1642 						mObjectID,	/* containing directory */
1643 						rStoreFilename, diffFromID, *blockIndexStream,
1644 						connection.GetTimeout(),
1645 						&rContext, // DiffTimer implementation
1646 						0 /* not interested in the modification time */,
1647 						&isCompletelyDifferent));
1648 
1649 				rContext.UnManageDiffProcess();
1650 
1651 				RateLimitingStream rateLimit(*patchStream,
1652 					rParams.mMaxUploadRate);
1653 				IOStream* pStreamToUpload;
1654 
1655 				if(rParams.mMaxUploadRate > 0)
1656 				{
1657 					pStreamToUpload = &rateLimit;
1658 				}
1659 				else
1660 				{
1661 					pStreamToUpload = patchStream.get();
1662 				}
1663 
1664 				//
1665 				// Upload the patch to the store
1666 				//
1667 				std::auto_ptr<BackupProtocolClientSuccess> stored(connection.QueryStoreFile(mObjectID, ModificationTime,
1668 						AttributesHash, isCompletelyDifferent?(0):(diffFromID), rStoreFilename, *pStreamToUpload));
1669 
1670 				// Get object ID from the result
1671 				objID = stored->GetObjectID();
1672 
1673 				// Don't attempt to upload it again!
1674 				doNormalUpload = false;
1675 
1676 				// Capture number of bytes sent
1677 				uploadedSize = ((BackupStoreFileEncodeStream &)
1678 					*patchStream).GetTotalBytesSent();
1679 			}
1680 		}
1681 
1682 		if(doNormalUpload)
1683 		{
1684 			// below threshold or nothing to diff from, so upload whole
1685 			rNotifier.NotifyFileUploading(this, rFilename);
1686 
1687 			// Prepare to upload, getting a stream which will encode the file as we go along
1688 			std::auto_ptr<IOStream> upload(
1689 				BackupStoreFile::EncodeFile(rFilename.c_str(),
1690 					mObjectID, rStoreFilename, NULL,
1691 					&rParams,
1692 					&(rParams.mrRunStatusProvider)));
1693 
1694 			RateLimitingStream rateLimit(*upload,
1695 				rParams.mMaxUploadRate);
1696 			IOStream* pStreamToUpload;
1697 
1698 			if(rParams.mMaxUploadRate > 0)
1699 			{
1700 				pStreamToUpload = &rateLimit;
1701 			}
1702 			else
1703 			{
1704 				pStreamToUpload = upload.get();
1705 			}
1706 
1707 			// Send to store
1708 			std::auto_ptr<BackupProtocolClientSuccess> stored(
1709 				connection.QueryStoreFile(
1710 					mObjectID, ModificationTime,
1711 					AttributesHash,
1712 					0 /* no diff from file ID */,
1713 					rStoreFilename, *pStreamToUpload));
1714 
1715 			// Get object ID from the result
1716 			objID = stored->GetObjectID();
1717 
1718 			uploadedSize = ((BackupStoreFileEncodeStream &)
1719 				*upload).GetTotalBytesSent();
1720 		}
1721 	}
1722 	catch(BoxException &e)
1723 	{
1724 		rContext.UnManageDiffProcess();
1725 
1726 		if(e.GetType() == ConnectionException::ExceptionType &&
1727 			e.GetSubType() == ConnectionException::Protocol_UnexpectedReply)
1728 		{
1729 			// Check and see what error the protocol has,
1730 			// this is more useful to users than the exception.
1731 			int type, subtype;
1732 			if(connection.GetLastError(type, subtype))
1733 			{
1734 				if(type == BackupProtocolClientError::ErrorType
1735 				&& subtype == BackupProtocolClientError::Err_StorageLimitExceeded)
1736 				{
1737 					// The hard limit was exceeded on the server, notify!
1738 					rParams.mrSysadminNotifier.NotifySysadmin(
1739 						SysadminNotifier::StoreFull);
1740 					// return an error code instead of
1741 					// throwing an exception that we
1742 					// can't debug.
1743 					return 0;
1744 				}
1745 				rNotifier.NotifyFileUploadServerError(this,
1746 					rFilename, type, subtype);
1747 			}
1748 		}
1749 
1750 		// Send the error on it's way
1751 		throw;
1752 	}
1753 
1754 	rNotifier.NotifyFileUploaded(this, rFilename, FileSize, uploadedSize);
1755 
1756 	// Return the new object ID of this file
1757 	return objID;
1758 }
1759 
1760 
1761 // --------------------------------------------------------------------------
1762 //
1763 // Function
1764 //		Name:    BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(SyncParams &, const char *)
1765 //		Purpose: Sets the error state when there were problems reading an object
1766 //				 from the filesystem.
1767 //		Created: 29/3/04
1768 //
1769 // --------------------------------------------------------------------------
SetErrorWhenReadingFilesystemObject(BackupClientDirectoryRecord::SyncParams & rParams,const char * Filename)1770 void BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(BackupClientDirectoryRecord::SyncParams &rParams, const char *Filename)
1771 {
1772 	// Zero hash, so it gets synced properly next time round.
1773 	::memset(mStateChecksum, 0, sizeof(mStateChecksum));
1774 
1775 	// Log the error - already done by caller
1776 	/*
1777 	rParams.GetProgressNotifier().NotifyFileReadFailed(this,
1778 		Filename, strerror(errno));
1779 	*/
1780 
1781 	// Mark that an error occured in the parameters object
1782 	rParams.mReadErrorsOnFilesystemObjects = true;
1783 }
1784 
1785 
1786 
1787 // --------------------------------------------------------------------------
1788 //
1789 // Function
1790 //		Name:    BackupClientDirectoryRecord::SyncParams::SyncParams(BackupClientContext &)
1791 //		Purpose: Constructor
1792 //		Created: 8/3/04
1793 //
1794 // --------------------------------------------------------------------------
SyncParams(RunStatusProvider & rRunStatusProvider,SysadminNotifier & rSysadminNotifier,ProgressNotifier & rProgressNotifier,BackupClientContext & rContext)1795 BackupClientDirectoryRecord::SyncParams::SyncParams(
1796 	RunStatusProvider &rRunStatusProvider,
1797 	SysadminNotifier &rSysadminNotifier,
1798 	ProgressNotifier &rProgressNotifier,
1799 	BackupClientContext &rContext)
1800 : mSyncPeriodStart(0),
1801   mSyncPeriodEnd(0),
1802   mMaxUploadWait(0),
1803   mMaxFileTimeInFuture(99999999999999999LL),
1804   mFileTrackingSizeThreshold(16*1024),
1805   mDiffingUploadSizeThreshold(16*1024),
1806   mrRunStatusProvider(rRunStatusProvider),
1807   mrSysadminNotifier(rSysadminNotifier),
1808   mrProgressNotifier(rProgressNotifier),
1809   mrContext(rContext),
1810   mReadErrorsOnFilesystemObjects(false),
1811   mMaxUploadRate(0),
1812   mUploadAfterThisTimeInTheFuture(99999999999999999LL),
1813   mHaveLoggedWarningAboutFutureFileTimes(false)
1814 {
1815 }
1816 
1817 
1818 // --------------------------------------------------------------------------
1819 //
1820 // Function
1821 //		Name:    BackupClientDirectoryRecord::SyncParams::~SyncParams()
1822 //		Purpose: Destructor
1823 //		Created: 8/3/04
1824 //
1825 // --------------------------------------------------------------------------
~SyncParams()1826 BackupClientDirectoryRecord::SyncParams::~SyncParams()
1827 {
1828 }
1829 
1830 // --------------------------------------------------------------------------
1831 //
1832 // Function
1833 //		Name:    BackupClientDirectoryRecord::Deserialize(Archive & rArchive)
1834 //		Purpose: Deserializes this object instance from a stream of bytes, using an Archive abstraction.
1835 //
1836 //		Created: 2005/04/11
1837 //
1838 // --------------------------------------------------------------------------
Deserialize(Archive & rArchive)1839 void BackupClientDirectoryRecord::Deserialize(Archive & rArchive)
1840 {
1841 	// Make deletion recursive
1842 	DeleteSubDirectories();
1843 
1844 	// Delete maps
1845 	if(mpPendingEntries != 0)
1846 	{
1847 		delete mpPendingEntries;
1848 		mpPendingEntries = 0;
1849 	}
1850 
1851 	//
1852 	//
1853 	//
1854 	rArchive.Read(mObjectID);
1855 	rArchive.Read(mSubDirName);
1856 	rArchive.Read(mInitialSyncDone);
1857 	rArchive.Read(mSyncDone);
1858 
1859 	//
1860 	//
1861 	//
1862 	int64_t iCount = 0;
1863 	rArchive.Read(iCount);
1864 
1865 	if (iCount != sizeof(mStateChecksum)/sizeof(mStateChecksum[0]))
1866 	{
1867 		// we have some kind of internal system representation change: throw for now
1868 		THROW_EXCEPTION(CommonException, Internal)
1869 	}
1870 
1871 	for (int v = 0; v < iCount; v++)
1872 	{
1873 		// Load each checksum entry
1874 		rArchive.Read(mStateChecksum[v]);
1875 	}
1876 
1877 	//
1878 	//
1879 	//
1880 	iCount = 0;
1881 	rArchive.Read(iCount);
1882 
1883 	if (iCount > 0)
1884 	{
1885 		// load each pending entry
1886 		mpPendingEntries = new std::map<std::string, box_time_t>;
1887 		if (!mpPendingEntries)
1888 		{
1889 			throw std::bad_alloc();
1890 		}
1891 
1892 		for (int v = 0; v < iCount; v++)
1893 		{
1894 			std::string strItem;
1895 			box_time_t btItem;
1896 
1897 			rArchive.Read(strItem);
1898 			rArchive.Read(btItem);
1899 			(*mpPendingEntries)[strItem] = btItem;
1900 		}
1901 	}
1902 
1903 	//
1904 	//
1905 	//
1906 	iCount = 0;
1907 	rArchive.Read(iCount);
1908 
1909 	if (iCount > 0)
1910 	{
1911 		for (int v = 0; v < iCount; v++)
1912 		{
1913 			std::string strItem;
1914 			rArchive.Read(strItem);
1915 
1916 			BackupClientDirectoryRecord* pSubDirRecord =
1917 				new BackupClientDirectoryRecord(0, "");
1918 			// will be deserialized anyway, give it id 0 for now
1919 
1920 			if (!pSubDirRecord)
1921 			{
1922 				throw std::bad_alloc();
1923 			}
1924 
1925 			/***** RECURSE *****/
1926 			pSubDirRecord->Deserialize(rArchive);
1927 			mSubDirectories[strItem] = pSubDirRecord;
1928 		}
1929 	}
1930 }
1931 
1932 // --------------------------------------------------------------------------
1933 //
1934 // Function
1935 //		Name:    BackupClientDirectoryRecord::Serialize(Archive & rArchive)
1936 //		Purpose: Serializes this object instance into a stream of bytes, using an Archive abstraction.
1937 //
1938 //		Created: 2005/04/11
1939 //
1940 // --------------------------------------------------------------------------
Serialize(Archive & rArchive) const1941 void BackupClientDirectoryRecord::Serialize(Archive & rArchive) const
1942 {
1943 	//
1944 	//
1945 	//
1946 	rArchive.Write(mObjectID);
1947 	rArchive.Write(mSubDirName);
1948 	rArchive.Write(mInitialSyncDone);
1949 	rArchive.Write(mSyncDone);
1950 
1951 	//
1952 	//
1953 	//
1954 	int64_t iCount = 0;
1955 
1956 	// when reading back the archive, we will
1957 	// need to know how many items there are.
1958 	iCount = sizeof(mStateChecksum) / sizeof(mStateChecksum[0]);
1959 	rArchive.Write(iCount);
1960 
1961 	for (int v = 0; v < iCount; v++)
1962 	{
1963 		rArchive.Write(mStateChecksum[v]);
1964 	}
1965 
1966 	//
1967 	//
1968 	//
1969 	if (!mpPendingEntries)
1970 	{
1971 		iCount = 0;
1972 		rArchive.Write(iCount);
1973 	}
1974 	else
1975 	{
1976 		iCount = mpPendingEntries->size();
1977 		rArchive.Write(iCount);
1978 
1979 		for (std::map<std::string, box_time_t>::const_iterator
1980 			i =  mpPendingEntries->begin();
1981 			i != mpPendingEntries->end(); i++)
1982 		{
1983 			rArchive.Write(i->first);
1984 			rArchive.Write(i->second);
1985 		}
1986 	}
1987 	//
1988 	//
1989 	//
1990 	iCount = mSubDirectories.size();
1991 	rArchive.Write(iCount);
1992 
1993 	for (std::map<std::string, BackupClientDirectoryRecord*>::const_iterator
1994 		i =  mSubDirectories.begin();
1995 		i != mSubDirectories.end(); i++)
1996 	{
1997 		const BackupClientDirectoryRecord* pSubItem = i->second;
1998 		ASSERT(pSubItem);
1999 
2000 		rArchive.Write(i->first);
2001 		pSubItem->Serialize(rArchive);
2002 	}
2003 }
2004