1 /*
2 * hashtree.cpp
3 * serp++
4 *
5 * Created by Victor Grishchenko on 3/6/09.
6 * Copyright 2009-2016 TECHNISCHE UNIVERSITEIT DELFT. All rights reserved.
7 *
8 */
9 #include "compat.h"
10 #include "hashtree.h"
11 #include "bin_utils.h"
12 //#include <openssl/sha.h>
13 #include "sha1.h"
14 #include <cassert>
15 #include <cstring>
16 #include <cstdlib>
17 #include <fcntl.h>
18 #include "swift.h"
19
20 #include <iostream>
21
22 using namespace swift;
23
24 const Sha1Hash Sha1Hash::ZERO = Sha1Hash();
25
SHA1(const void * data,size_t length,unsigned char * hash)26 void SHA1(const void *data, size_t length, unsigned char *hash)
27 {
28 blk_SHA_CTX ctx;
29 blk_SHA1_Init(&ctx);
30 blk_SHA1_Update(&ctx, data, length);
31 blk_SHA1_Final(hash, &ctx);
32 }
33
Sha1Hash(const Sha1Hash & left,const Sha1Hash & right)34 Sha1Hash::Sha1Hash(const Sha1Hash& left, const Sha1Hash& right)
35 {
36 blk_SHA_CTX ctx;
37 blk_SHA1_Init(&ctx);
38 blk_SHA1_Update(&ctx, left.bits,SIZE);
39 blk_SHA1_Update(&ctx, right.bits,SIZE);
40 blk_SHA1_Final(bits, &ctx);
41 }
42
Sha1Hash(const char * data,size_t length)43 Sha1Hash::Sha1Hash(const char* data, size_t length)
44 {
45 if (length==-1)
46 length = strlen(data);
47 SHA1((unsigned char*)data,length,bits);
48 }
49
Sha1Hash(const uint8_t * data,size_t length)50 Sha1Hash::Sha1Hash(const uint8_t* data, size_t length)
51 {
52 SHA1(data,length,bits);
53 }
54
Sha1Hash(bool hex,const char * hash)55 Sha1Hash::Sha1Hash(bool hex, const char* hash)
56 {
57 if (hex) {
58 int val;
59 for (int i=0; i<SIZE; i++) {
60 if (sscanf(hash+i*2, "%2x", &val)!=1) {
61 memset(bits,0,20);
62 return;
63 }
64 bits[i] = val;
65 }
66 assert(this->hex()==std::string(hash));
67 } else
68 memcpy(bits,hash,SIZE);
69 }
70
hex() const71 std::string Sha1Hash::hex() const
72 {
73 char hex[HASHSZ*2+1];
74 for (int i=0; i<HASHSZ; i++)
75 sprintf(hex+i*2, "%02x", (int)(unsigned char)bits[i]);
76 return std::string(hex,HASHSZ*2);
77 }
78
79
operator =(const Sha1Hash & source)80 Sha1Hash & Sha1Hash::operator= (const Sha1Hash & source)
81 {
82 if (this != &source) {
83 memcpy(bits,source.bits,SIZE);
84 }
85 return *this;
86 }
87
88
89
90 /** H a s h t r e e */
91
92
MmapHashTree(Storage * storage,const Sha1Hash & root_hash,uint32_t chunk_size,std::string hash_filename,bool force_check_diskvshash,std::string binmap_filename)93 MmapHashTree::MmapHashTree(Storage *storage, const Sha1Hash& root_hash, uint32_t chunk_size, std::string hash_filename,
94 bool force_check_diskvshash,std::string binmap_filename) :
95 HashTree(), root_hash_(root_hash), hashes_(NULL),
96 peak_count_(0), hash_fd_(-1), hash_filename_(hash_filename), size_(0), sizec_(0), complete_(0), completec_(0),
97 chunk_size_(chunk_size), storage_(storage)
98 {
99 // MULTIFILE
100 storage_->SetHashTree(this);
101 // If multi-file spec we know the exact size even before getting peaks+last chunk
102 int64_t sizefromspec = storage_->GetSizeFromSpec();
103 if (sizefromspec != -1) {
104 set_size(sizefromspec);
105 // Resize all files
106 (void)storage_->ResizeReserved(sizefromspec);
107 }
108
109 // Arno: if user doesn't want to check hashes but no .mhash, check hashes anyway
110 bool actually_force_check_diskvshash = force_check_diskvshash;
111 bool mhash_exists=true;
112 int64_t mhash_size = file_size_by_path_utf8(hash_filename);
113 if (mhash_size < 0)
114 mhash_exists = false;
115 // Arno, 2012-07-26: Quick fix against partial downloads without .mhash.
116 // Previously they would be Submit()ed and the root_hash_ would change.
117 // Now if the root_hash_ is set, we don't recompute the tree. More permanent
118 // solution is to hashcheck the content, and if it doesn't match the root
119 // hash, revert to a clean state.
120 //
121 if (root_hash_==Sha1Hash::ZERO && !mhash_exists)
122 actually_force_check_diskvshash = true;
123
124 // Arno: if the remainder of the hashtree state is on disk we can
125 // hashcheck very quickly
126 bool binmap_exists=true;
127 int res = file_exists_utf8(binmap_filename);
128 if (res <= 0)
129 binmap_exists = false;
130 if (root_hash_==Sha1Hash::ZERO && !binmap_exists)
131 actually_force_check_diskvshash = true;
132
133 //fprintf(stderr,"hashtree: hashchecking %s file %s destdir %s want %s do %s mhash-on-disk %s binmap-on-disk %s\n", root_hash.hex().c_str(), storage_->GetOSPathName().c_str(), storage_->GetDestDir().c_str(), (force_check_diskvshash ? "yes" : "no"), (actually_force_check_diskvshash? "yes" : "no"), (mhash_exists? "yes" : "no"), (binmap_exists? "yes" : "no") );
134 // Arno, 2012-07-27: Sanity check
135 if ((mhash_exists || binmap_exists) && storage_->GetReservedSize() == -1) {
136 print_error("meta files present but not content");
137 SetBroken();
138 return;
139 }
140
141 // Arno, 2012-09-19: Hash file created only when msgs incoming
142 if (mhash_exists) {
143 hash_fd_ = OpenHashFile();
144 if (hash_fd_ < 0)
145 return;
146 }
147
148 // Arno: if user wants to or no .mhash, and if root hash unknown (new file) and no checkpoint, (re)calc root hash
149 if (storage_->GetReservedSize() > storage_->GetMinimalReservedSize() && actually_force_check_diskvshash) {
150 // fresh submit, hash it
151 dprintf("%s hashtree full compute\n",tintstr());
152 //assert(storage_->GetReservedSize());
153 Submit();
154 } else if (mhash_exists && binmap_exists && mhash_size > 0) {
155 // Arno: recreate hash tree without rereading content
156 dprintf("%s hashtree read from checkpoint\n",tintstr());
157 FILE *fp = fopen_utf8(binmap_filename.c_str(),"rb");
158 if (!fp) {
159 print_error("hashtree: cannot open .mbinmap file");
160 SetBroken();
161 return;
162 }
163 if (deserialize(fp) < 0) {
164 // Try to rebuild hashtree data
165 Submit();
166 }
167 fclose(fp);
168 } else {
169 // Arno: no data on disk, or mhash on disk, but no binmap. In latter
170 // case recreate binmap by reading content again. Historic optimization
171 // of Submit.
172 dprintf("%s hashtree empty or partial recompute\n",tintstr());
173 RecoverProgress();
174 }
175 }
176
177
MmapHashTree(bool dummy,std::string binmap_filename)178 MmapHashTree::MmapHashTree(bool dummy, std::string binmap_filename) :
179 HashTree(), root_hash_(Sha1Hash::ZERO), hashes_(NULL), peak_count_(0), hash_fd_(0),
180 hash_filename_(""), filename_(""), size_(0), sizec_(0), complete_(0), completec_(0),
181 chunk_size_(0)
182 {
183 FILE *fp = fopen_utf8(binmap_filename.c_str(),"rb");
184 if (!fp) {
185 SetBroken();
186 return;
187 }
188 if (partial_deserialize(fp) < 0) {
189 }
190 fclose(fp);
191 }
192
OpenHashFile()193 int MmapHashTree::OpenHashFile()
194 {
195 hash_fd_ = open_utf8(hash_filename_.c_str(),OPENFLAGS,S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH);
196 if (hash_fd_<0) {
197 hash_fd_ = -1;
198 print_error("cannot create/open hash file");
199 SetBroken();
200 }
201 return hash_fd_;
202 }
203
204
205 // Reads complete file and constructs hash tree
Submit()206 void MmapHashTree::Submit()
207 {
208 size_ = storage_->GetReservedSize();
209 sizec_ = (size_ + chunk_size_-1) / chunk_size_;
210
211 //fprintf(stderr,"hashtree: submit: cs %i\n", chunk_size_);
212
213 peak_count_ = gen_peaks(sizec_,peaks_);
214 int hashes_size = Sha1Hash::SIZE*sizec_*2;
215 dprintf("%s hashtree submit resizing hash file to %d\n",tintstr(), hashes_size);
216 if (hashes_size == 0) {
217 SetBroken();
218 return;
219 }
220
221 // Arno, 2012-09-19: Hash file created only when msgs incoming
222 if (hash_fd_ == -1) {
223 hash_fd_ = OpenHashFile();
224 if (hash_fd_ < 0)
225 return;
226 }
227
228 file_resize(hash_fd_,hashes_size);
229 hashes_ = (Sha1Hash*) memory_map(hash_fd_,hashes_size);
230 if (!hashes_) {
231 size_ = sizec_ = complete_ = completec_ = 0;
232 print_error("mmap failed");
233 SetBroken();
234 return;
235 }
236 size_t last_piece_size = (sizec_ - 1) % (chunk_size_) + 1;
237 char *chunk = new char[chunk_size_];
238 for (uint64_t i=0; i<sizec_; i++) {
239
240 ssize_t rd = storage_->Read(chunk,chunk_size_,i*chunk_size_);
241 if (rd<(chunk_size_) && i!=sizec_-1) {
242 free(hashes_);
243 hashes_=NULL;
244 SetBroken();
245 return;
246 }
247 bin_t pos(0,i);
248 hashes_[pos.toUInt()] = Sha1Hash(chunk,rd);
249 ack_out_.set(pos);
250 while (pos.is_right()) {
251 pos = pos.parent();
252 hashes_[pos.toUInt()] = Sha1Hash(hashes_[pos.left().toUInt()],hashes_[pos.right().toUInt()]);
253 }
254 complete_+=rd;
255 completec_++;
256 }
257 delete chunk;
258 for (int p=0; p<peak_count_; p++) {
259 peak_hashes_[p] = hashes_[peaks_[p].toUInt()];
260 }
261
262 Sha1Hash calcroothash = DeriveRoot();
263 if (root_hash_ != Sha1Hash::ZERO && calcroothash != root_hash_) {
264 print_error("hash tree calculation error");
265 SetBroken();
266 return;
267 }
268 root_hash_ = calcroothash;
269 }
270
271
272 /** Basically, simulated receiving every single chunk, except
273 for some optimizations.
274 Precondition: root hash known */
RecoverProgress()275 void MmapHashTree::RecoverProgress()
276 {
277
278 //fprintf(stderr,"hashtree: recover: cs %i\n", chunk_size_);
279
280 if (!RecoverPeakHashes())
281 return; // Not fatal
282
283 // at this point, we may use mmapd hashes already
284 // so, lets verify hashes and the data we've got
285 char *zero_chunk = new char[chunk_size_];
286 memset(zero_chunk, 0, chunk_size_);
287 Sha1Hash zero_hash(zero_chunk,chunk_size_);
288
289 // Arno: loop over all pieces, read each from file
290 // Note that we may have the complete hashtree, but not have all pieces.
291 // So hash file gives too little information to determine whether file is
292 // complete on disk, hence the .mbinmap file.
293 //
294 char *buf = new char[chunk_size_];
295 for (int p=0; p<size_in_chunks(); p++) {
296 bin_t pos(0,p);
297 if (hashes_[pos.toUInt()]==Sha1Hash::ZERO)
298 continue;
299 ssize_t rd = storage_->Read(buf,chunk_size_,p*chunk_size_);
300 if (rd!=(chunk_size_) && p!=size_in_chunks()-1)
301 break;
302 if (rd==(chunk_size_) && !memcmp(buf, zero_chunk, rd) &&
303 hashes_[pos.toUInt()]!=zero_hash) // FIXME // Arno == don't have piece yet?
304 continue;
305 if (!OfferHash(pos, Sha1Hash(buf,rd)))
306 continue;
307 ack_out_.set(pos);
308 completec_++;
309 complete_+=rd;
310 if (rd!=(chunk_size_) && p==size_in_chunks()-1) // set the exact file size
311 size_ = ((sizec_-1)*chunk_size_) + rd;
312 }
313 delete[] buf;
314 delete[] zero_chunk;
315 }
316
317 /** Precondition: root hash known */
RecoverPeakHashes()318 bool MmapHashTree::RecoverPeakHashes()
319 {
320 int64_t ret = storage_->GetReservedSize();
321 if (ret < 0)
322 return false;
323
324 uint64_t size = ret;
325 uint64_t sizek = (size + chunk_size_-1) / chunk_size_;
326
327 // Arno: Calc location of peak hashes, read them from hash file and check if
328 // they match to root hash. If so, load hashes into memory.
329 bin_t peaks[64];
330 int peak_count = gen_peaks(sizek,peaks);
331 for (int i=0; i<peak_count; i++) {
332 Sha1Hash peak_hash;
333 file_seek(hash_fd_,peaks[i].toUInt()*sizeof(Sha1Hash));
334 if (read(hash_fd_,&peak_hash,sizeof(Sha1Hash))!=sizeof(Sha1Hash))
335 return false;
336 OfferPeakHash(peaks[i], peak_hash);
337 }
338 if (!this->size())
339 return false; // if no valid peak hashes found
340
341 return true;
342 }
343
serialize(FILE * fp)344 int MmapHashTree::serialize(FILE *fp)
345 {
346 fprintf_retiffail(fp,"version %i\n", 1);
347 fprintf_retiffail(fp,"root hash %s\n", root_hash_.hex().c_str());
348 fprintf_retiffail(fp,"chunk size %" PRIu32 "\n", chunk_size_);
349 fprintf_retiffail(fp,"complete %" PRIu64 "\n", complete_);
350 fprintf_retiffail(fp,"completec %" PRIu64 "\n", completec_);
351 return ack_out_.serialize(fp);
352 }
353
354
355 /** Arno: recreate hash tree from .mbinmap file without rereading content.
356 * Precondition: root hash known
357 */
deserialize(FILE * fp)358 int MmapHashTree::deserialize(FILE *fp)
359 {
360 return internal_deserialize(fp,true);
361 }
362
partial_deserialize(FILE * fp)363 int MmapHashTree::partial_deserialize(FILE *fp)
364 {
365 return internal_deserialize(fp,false);
366 }
367
368
internal_deserialize(FILE * fp,bool contentavail)369 int MmapHashTree::internal_deserialize(FILE *fp,bool contentavail)
370 {
371
372 char hexhashstr[256];
373 uint64_t c,cc;
374 uint32_t cs;
375 int version;
376
377 fscanf_retiffail(fp,"version %i\n", &version);
378 fscanf_retiffail(fp,"root hash %s\n", hexhashstr);
379 fscanf_retiffail(fp,"chunk size %" PRIu32 "\n", &cs);
380 fscanf_retiffail(fp,"complete %" PRIu64 "\n", &c);
381 fscanf_retiffail(fp,"completec %" PRIu64 "\n", &cc);
382
383 if (ack_out_.deserialize(fp) < 0)
384 return -1;
385 root_hash_ = Sha1Hash(true, hexhashstr);
386 chunk_size_ = cs;
387 complete_ = c;
388 completec_ = cc;
389
390 // Arno, 2012-01-03: Hack to just get root hash
391 if (!contentavail)
392 return 2;
393
394 if (!RecoverPeakHashes()) {
395 root_hash_ = Sha1Hash::ZERO;
396 ack_out_.clear();
397 return -1;
398 }
399
400 // Are reset by RecoverPeakHashes() for some reason.
401 complete_ = c;
402 completec_ = cc;
403 size_ = storage_->GetReservedSize();
404 sizec_ = (size_ + chunk_size_-1) / chunk_size_;
405
406 return 0;
407 }
408
409
OfferPeakHash(bin_t pos,const Sha1Hash & hash)410 bool MmapHashTree::OfferPeakHash(bin_t pos, const Sha1Hash& hash)
411 {
412 dprintf("%s hashtree offer peak %s\n",tintstr(),pos.str().c_str());
413
414 // Arno: This code expects peaks to be sent in tree descending order,
415 // as is now required in PPSP-04.
416 //assert(!size_);
417 if (peak_count_) {
418 bin_t last_peak = peaks_[peak_count_-1];
419 if (pos.layer()>=last_peak.layer() ||
420 pos.base_offset()!=last_peak.base_offset()+last_peak.base_length())
421 peak_count_ = 0;
422 }
423 peaks_[peak_count_] = pos;
424 peak_hashes_[peak_count_] = hash;
425 peak_count_++;
426 // check whether peak hash candidates add up to the root hash
427 Sha1Hash mustbe_root = DeriveRoot();
428 if (mustbe_root!=root_hash_)
429 return false;
430 for (int i=0; i<peak_count_; i++)
431 sizec_ += peaks_[i].base_length();
432
433 // bingo, we now know the file size (rounded up to a chunk_size() unit)
434
435 if (!size_) // MULTIFILE: not known from spec
436 size_ = sizec_ * chunk_size_;
437 completec_ = complete_ = 0;
438 sizec_ = (size_ + chunk_size_-1) / chunk_size_;
439
440 // ARNOTODO: win32: this is pretty slow for ~200 MB already. Perhaps do
441 // on-demand sizing for Win32?
442 uint64_t cur_size = storage_->GetReservedSize();
443 if (cur_size<=(sizec_-1)*chunk_size_ || cur_size>sizec_*chunk_size_) {
444 dprintf("%s hashtree offerpeak resizing file\n",tintstr());
445 if (storage_->ResizeReserved(size_)) {
446 print_error("cannot set file size\n");
447 size_=0; // remain in the 0-state
448 return false;
449 }
450 }
451
452 // Arno, 2012-09-19: Hash file created only when msgs incoming
453 if (hash_fd_ == -1) {
454 hash_fd_ = OpenHashFile();
455 if (hash_fd_ < 0)
456 return false;
457 }
458
459 // mmap the hash file into memory
460 uint64_t expected_size = sizeof(Sha1Hash)*sizec_*2;
461 // Arno, 2011-10-18: on Windows we could optimize this away,
462 //CreateFileMapping, see compat.cpp will resize the file for us with
463 // the right params.
464 //
465 if (file_size(hash_fd_) != expected_size) {
466 dprintf("%s hashtree offerpeak resizing hash file\n",tintstr());
467 file_resize(hash_fd_, expected_size);
468 }
469
470 hashes_ = (Sha1Hash*) memory_map(hash_fd_,expected_size);
471 if (!hashes_) {
472 size_ = sizec_ = complete_ = completec_ = 0;
473 print_error("mmap failed");
474 return false;
475 }
476
477 for (int i=0; i<peak_count_; i++)
478 hashes_[peaks_[i].toUInt()] = peak_hashes_[i];
479
480 dprintf("%s hashtree memory mapped\n",tintstr());
481
482 return true;
483 }
484
485
DeriveRoot()486 Sha1Hash MmapHashTree::DeriveRoot()
487 {
488
489 dprintf("%s hashtree deriving root\n",tintstr());
490
491 int c = peak_count_-1;
492 bin_t p = peaks_[c];
493 Sha1Hash hash = peak_hashes_[c];
494 c--;
495 // Arno, 2011-10-14: Root hash = top of smallest tree covering content IMHO.
496 //while (!p.is_all()) {
497 while (c >= 0) {
498 if (p.is_left()) {
499 p = p.parent();
500 hash = Sha1Hash(hash,Sha1Hash::ZERO);
501 } else {
502 if (c<0 || peaks_[c]!=p.sibling())
503 return Sha1Hash::ZERO;
504 hash = Sha1Hash(peak_hashes_[c],hash);
505 p = p.parent();
506 c--;
507 }
508 }
509
510 //fprintf(stderr,"hashtree: derive: root hash is %s\n", hash.hex().c_str() );
511
512 //fprintf(stderr,"root bin is %" PRIi64 " covers %" PRIi64 "\n", p.toUInt(), p.base_length() );
513 return hash;
514 }
515
516
517 /** For live streaming: appends the data, adjusts the tree.
518 @ return the number of fresh (tail) peak hashes */
AppendData(char * data,int length)519 int MmapHashTree::AppendData(char* data, int length)
520 {
521 return 0;
522 }
523
524
peak_for(bin_t pos) const525 bin_t MmapHashTree::peak_for(bin_t pos) const
526 {
527 int pi=0;
528 while (pi<peak_count_ && !peaks_[pi].contains(pos))
529 pi++;
530 return pi==peak_count_ ? bin_t(bin_t::NONE) : peaks_[pi];
531 }
532
OfferHash(bin_t pos,const Sha1Hash & hash)533 bool MmapHashTree::OfferHash(bin_t pos, const Sha1Hash& hash)
534 {
535 if (!size_) // only peak hashes are accepted at this point
536 return OfferPeakHash(pos,hash);
537 if (hashes_ == NULL) {
538 dprintf("%s hashtree never loaded correctly from disk\n",tintstr());
539 return false;
540 }
541
542 bin_t peak = peak_for(pos);
543 if (peak.is_none())
544 return false;
545 if (peak==pos)
546 return hash == hashes_[pos.toUInt()];
547 if (!ack_out_.is_empty(pos.parent()))
548 return hash==hashes_[pos.toUInt()]; // have this hash already, even accptd data
549 // LESSHASH
550 // Arno: if we already verified this hash against the root, don't replace
551 if (!is_hash_verified_.is_empty(bin_t(0,pos.toUInt())))
552 return hash == hashes_[pos.toUInt()];
553
554 hashes_[pos.toUInt()] = hash;
555 if (!pos.is_base())
556 return false; // who cares?
557 bin_t p = pos;
558 Sha1Hash uphash = hash;
559 // Arno: Note well: bin_t(0,p.toUInt()) is to abuse binmap as bitmap.
560 while (p!=peak && ack_out_.is_empty(p) && is_hash_verified_.is_empty(bin_t(0,p.toUInt()))) {
561 hashes_[p.toUInt()] = uphash;
562 p = p.parent();
563 // Arno: Prevent poisoning the tree with bad values:
564 // Left hand hashes should never be zero, and right
565 // hand hash is only zero for the last packet, i.e.,
566 // layer 0. Higher layers will never have 0 hashes
567 // as SHA1(zero+zero) != zero (but b80de5...)
568 //
569 if (hashes_[p.left().toUInt()] == Sha1Hash::ZERO || hashes_[p.right().toUInt()] == Sha1Hash::ZERO)
570 break;
571 uphash = Sha1Hash(hashes_[p.left().toUInt()],hashes_[p.right().toUInt()]);
572 }// walk to the nearest proven hash
573
574 bool success = (uphash==hashes_[p.toUInt()]);
575 // LESSHASH
576 if (success) {
577 // Arno: The hash checks out. Mark all hashes on the uncle path as
578 // being verified, so we don't have to go higher than them on a next
579 // check.
580 p = pos;
581 // Arno: Note well: bin_t(0,p.toUInt()) is to abuse binmap as bitmap.
582 is_hash_verified_.set(bin_t(0,p.toUInt()));
583 while (p.layer() != peak.layer()) {
584 p = p.parent().sibling();
585 is_hash_verified_.set(bin_t(0,p.toUInt()));
586 }
587 // Also mark hashes on direct path to root as verified. Doesn't decrease
588 // #checks, but does increase the number of verified hashes faster.
589 p = pos;
590 while (p != peak) {
591 p = p.parent();
592 is_hash_verified_.set(bin_t(0,p.toUInt()));
593 }
594 }
595
596 return success;
597 }
598
599
OfferData(bin_t pos,const char * data,size_t length)600 bool MmapHashTree::OfferData(bin_t pos, const char* data, size_t length)
601 {
602 if (!size())
603 return false;
604 if (!pos.is_base())
605 return false;
606 if (length<chunk_size_ && pos!=bin_t(0,sizec_-1))
607 return false;
608 if (ack_out_.is_filled(pos))
609 return true; // to set data_in_
610 bin_t peak = peak_for(pos);
611 if (peak.is_none())
612 return false;
613
614 Sha1Hash data_hash(data,length);
615 if (!OfferHash(pos, data_hash)) {
616 //printf("invalid hash for %s: %s\n",pos.str(bin_name_buf),data_hash.hex().c_str()); // paranoid
617 //fprintf(stderr,"INVALID HASH FOR %" PRIi64 " layer %d\n", pos.toUInt(), pos.layer() );
618 // Ric: TODO it's not necessarily a bug.. it happens if a pkt was lost!
619 dprintf("%s hashtree check failed (bug TODO) %s\n",tintstr(),pos.str().c_str());
620 return false;
621 }
622
623 //printf("g %" PRIi64 " %s\n",(uint64_t)pos,hash.hex().c_str());
624 ack_out_.set(pos);
625 // Arno,2011-10-03: appease g++
626 if (storage_->Write(data,length,pos.base_offset()*chunk_size_) < 0)
627 print_error("pwrite failed");
628 complete_ += length;
629 completec_++;
630 if (pos.base_offset()==sizec_-1) {
631 size_ = ((sizec_-1)*chunk_size_) + length;
632 if (storage_->GetReservedSize()!=size_)
633 storage_->ResizeReserved(size_);
634 }
635 return true;
636 }
637
638
seq_complete(int64_t offset)639 uint64_t MmapHashTree::seq_complete(int64_t offset)
640 {
641
642 uint64_t seqc = 0;
643 if (offset == 0) {
644 uint64_t seqc = ack_out_.find_empty().base_offset();
645 if (seqc==sizec_)
646 return size_;
647 else
648 return seqc*chunk_size_;
649 } else {
650 // SEEK: Calc sequentially complete bytes from an offset
651 bin_t binoff = bin_t(0,(offset - (offset % chunk_size_)) / chunk_size_);
652 bin_t nextempty = ack_out_.find_empty(binoff);
653 if (nextempty == bin_t::NONE || nextempty.base_offset() * chunk_size_ > size_)
654 return size_-offset; // All filled from offset
655
656 bin_t::uint_t diffc = nextempty.layer_offset() - binoff.layer_offset();
657 uint64_t diffb = diffc * chunk_size_;
658 if (diffb > 0)
659 diffb -= (offset % chunk_size_);
660
661 return diffb;
662 }
663 }
664
665
~MmapHashTree()666 MmapHashTree::~MmapHashTree()
667 {
668 if (hashes_)
669 memory_unmap(hash_fd_, hashes_, sizec_*2*sizeof(Sha1Hash));
670 if (hash_fd_ >= 0) {
671 close(hash_fd_);
672 }
673 }
674
675