1package Tie::File; 2 3require 5.005; 4 5use strict; 6use warnings; 7 8use Carp ':DEFAULT', 'confess'; 9use POSIX 'SEEK_SET'; 10use Fcntl 'O_CREAT', 'O_RDWR', 'LOCK_EX', 'LOCK_SH', 'O_WRONLY', 'O_RDONLY'; 11sub O_ACCMODE () { O_RDONLY | O_RDWR | O_WRONLY } 12 13 14our $VERSION = "1.06"; 15my $DEFAULT_MEMORY_SIZE = 1<<21; # 2 megabytes 16my $DEFAULT_AUTODEFER_THRESHHOLD = 3; # 3 records 17my $DEFAULT_AUTODEFER_FILELEN_THRESHHOLD = 65536; # 16 disk blocksful 18 19my %good_opt = map {$_ => 1, "-$_" => 1} 20 qw(memory dw_size mode recsep discipline 21 autodefer autochomp autodefer_threshhold concurrent); 22 23our $DIAGNOSTIC = 0; 24our @OFF; # used as a temporary alias in some subroutines. 25our @H; # used as a temporary alias in _annotate_ad_history 26 27sub TIEARRAY { 28 if (@_ % 2 != 0) { 29 croak "usage: tie \@array, $_[0], filename, [option => value]..."; 30 } 31 my ($pack, $file, %opts) = @_; 32 33 # transform '-foo' keys into 'foo' keys 34 for my $key (keys %opts) { 35 unless ($good_opt{$key}) { 36 croak("$pack: Unrecognized option '$key'\n"); 37 } 38 my $okey = $key; 39 if ($key =~ s/^-+//) { 40 $opts{$key} = delete $opts{$okey}; 41 } 42 } 43 44 if ($opts{concurrent}) { 45 croak("$pack: concurrent access not supported yet\n"); 46 } 47 48 unless (defined $opts{memory}) { 49 # default is the larger of the default cache size and the 50 # deferred-write buffer size (if specified) 51 $opts{memory} = $DEFAULT_MEMORY_SIZE; 52 $opts{memory} = $opts{dw_size} 53 if defined $opts{dw_size} && $opts{dw_size} > $DEFAULT_MEMORY_SIZE; 54 # Dora Winifred Read 55 } 56 $opts{dw_size} = $opts{memory} unless defined $opts{dw_size}; 57 if ($opts{dw_size} > $opts{memory}) { 58 croak("$pack: dw_size may not be larger than total memory allocation\n"); 59 } 60 # are we in deferred-write mode? 61 $opts{defer} = 0 unless defined $opts{defer}; 62 $opts{deferred} = {}; # no records are presently deferred 63 $opts{deferred_s} = 0; # count of total bytes in ->{deferred} 64 $opts{deferred_max} = -1; # empty 65 66 # What's a good way to arrange that this class can be overridden? 67 $opts{cache} = Tie::File::Cache->new($opts{memory}); 68 69 # autodeferment is enabled by default 70 $opts{autodefer} = 1 unless defined $opts{autodefer}; 71 $opts{autodeferring} = 0; # but is not initially active 72 $opts{ad_history} = []; 73 $opts{autodefer_threshhold} = $DEFAULT_AUTODEFER_THRESHHOLD 74 unless defined $opts{autodefer_threshhold}; 75 $opts{autodefer_filelen_threshhold} = $DEFAULT_AUTODEFER_FILELEN_THRESHHOLD 76 unless defined $opts{autodefer_filelen_threshhold}; 77 78 $opts{offsets} = [0]; 79 $opts{filename} = $file; 80 unless (defined $opts{recsep}) { 81 $opts{recsep} = _default_recsep(); 82 } 83 $opts{recseplen} = length($opts{recsep}); 84 if ($opts{recseplen} == 0) { 85 croak "Empty record separator not supported by $pack"; 86 } 87 88 $opts{autochomp} = 1 unless defined $opts{autochomp}; 89 90 $opts{mode} = O_CREAT|O_RDWR unless defined $opts{mode}; 91 $opts{rdonly} = (($opts{mode} & O_ACCMODE) == O_RDONLY); 92 $opts{sawlastrec} = undef; 93 94 my $fh; 95 96 if (UNIVERSAL::isa($file, 'GLOB')) { 97 # We use 1 here on the theory that some systems 98 # may not indicate failure if we use 0. 99 # MSWin32 does not indicate failure with 0, but I don't know if 100 # it will indicate failure with 1 or not. 101 unless (seek $file, 1, SEEK_SET) { 102 croak "$pack: your filehandle does not appear to be seekable"; 103 } 104 seek $file, 0, SEEK_SET; # put it back 105 $fh = $file; # setting binmode is the user's problem 106 } elsif (ref $file) { 107 croak "usage: tie \@array, $pack, filename, [option => value]..."; 108 } else { 109 # $fh = \do { local *FH }; # XXX this is buggy 110 if ($] < 5.006) { 111 # perl 5.005 and earlier don't autovivify filehandles 112 require Symbol; 113 $fh = Symbol::gensym(); 114 } 115 sysopen $fh, $file, $opts{mode}, 0666 or return; 116 binmode $fh; 117 ++$opts{ourfh}; 118 } 119 { my $ofh = select $fh; $| = 1; select $ofh } # autoflush on write 120 if (defined $opts{discipline} && $] >= 5.006) { 121 # This avoids a compile-time warning under 5.005 122 eval 'binmode($fh, $opts{discipline})'; 123 croak $@ if $@ =~ /unknown discipline/i; 124 die if $@; 125 } 126 $opts{fh} = $fh; 127 128 bless \%opts => $pack; 129} 130 131sub FETCH { 132 my ($self, $n) = @_; 133 my $rec; 134 135 # check the defer buffer 136 $rec = $self->{deferred}{$n} if exists $self->{deferred}{$n}; 137 $rec = $self->_fetch($n) unless defined $rec; 138 139 # inlined _chomp1 140 substr($rec, - $self->{recseplen}) = "" 141 if defined $rec && $self->{autochomp}; 142 $rec; 143} 144 145# Chomp many records in-place; return nothing useful 146sub _chomp { 147 my $self = shift; 148 return unless $self->{autochomp}; 149 if ($self->{autochomp}) { 150 for (@_) { 151 next unless defined; 152 substr($_, - $self->{recseplen}) = ""; 153 } 154 } 155} 156 157# Chomp one record in-place; return modified record 158sub _chomp1 { 159 my ($self, $rec) = @_; 160 return $rec unless $self->{autochomp}; 161 return unless defined $rec; 162 substr($rec, - $self->{recseplen}) = ""; 163 $rec; 164} 165 166sub _fetch { 167 my ($self, $n) = @_; 168 169 # check the record cache 170 { my $cached = $self->{cache}->lookup($n); 171 return $cached if defined $cached; 172 } 173 174 if ($#{$self->{offsets}} < $n) { 175 return if $self->{eof}; # request for record beyond end of file 176 my $o = $self->_fill_offsets_to($n); 177 # If it's still undefined, there is no such record, so return 'undef' 178 return unless defined $o; 179 } 180 181 my $fh = $self->{FH}; 182 $self->_seek($n); # we can do this now that offsets is populated 183 my $rec = $self->_read_record; 184 185# If we happen to have just read the first record, check to see if 186# the length of the record matches what 'tell' says. If not, Tie::File 187# won't work, and should drop dead. 188# 189# if ($n == 0 && defined($rec) && tell($self->{fh}) != length($rec)) { 190# if (defined $self->{discipline}) { 191# croak "I/O discipline $self->{discipline} not supported"; 192# } else { 193# croak "File encoding not supported"; 194# } 195# } 196 197 $self->{cache}->insert($n, $rec) if defined $rec && not $self->{flushing}; 198 $rec; 199} 200 201sub STORE { 202 my ($self, $n, $rec) = @_; 203 die "STORE called from _check_integrity!" if $DIAGNOSTIC; 204 205 $self->_fixrecs($rec); 206 207 if ($self->{autodefer}) { 208 $self->_annotate_ad_history($n); 209 } 210 211 return $self->_store_deferred($n, $rec) if $self->_is_deferring; 212 213 214 # We need this to decide whether the new record will fit 215 # It incidentally populates the offsets table 216 # Note we have to do this before we alter the cache 217 # 20020324 Wait, but this DOES alter the cache. TODO BUG? 218 my $oldrec = $self->_fetch($n); 219 220 if (not defined $oldrec) { 221 # We're storing a record beyond the end of the file 222 $self->_extend_file_to($n+1); 223 $oldrec = $self->{recsep}; 224 } 225# return if $oldrec eq $rec; # don't bother 226 my $len_diff = length($rec) - length($oldrec); 227 228 # length($oldrec) here is not consistent with text mode TODO XXX BUG 229 $self->_mtwrite($rec, $self->{offsets}[$n], length($oldrec)); 230 $self->_oadjust([$n, 1, $rec]); 231 $self->{cache}->update($n, $rec); 232} 233 234sub _store_deferred { 235 my ($self, $n, $rec) = @_; 236 $self->{cache}->remove($n); 237 my $old_deferred = $self->{deferred}{$n}; 238 239 if (defined $self->{deferred_max} && $n > $self->{deferred_max}) { 240 $self->{deferred_max} = $n; 241 } 242 $self->{deferred}{$n} = $rec; 243 244 my $len_diff = length($rec); 245 $len_diff -= length($old_deferred) if defined $old_deferred; 246 $self->{deferred_s} += $len_diff; 247 $self->{cache}->adj_limit(-$len_diff); 248 if ($self->{deferred_s} > $self->{dw_size}) { 249 $self->_flush; 250 } elsif ($self->_cache_too_full) { 251 $self->_cache_flush; 252 } 253} 254 255# Remove a single record from the deferred-write buffer without writing it 256# The record need not be present 257sub _delete_deferred { 258 my ($self, $n) = @_; 259 my $rec = delete $self->{deferred}{$n}; 260 return unless defined $rec; 261 262 if (defined $self->{deferred_max} 263 && $n == $self->{deferred_max}) { 264 undef $self->{deferred_max}; 265 } 266 267 $self->{deferred_s} -= length $rec; 268 $self->{cache}->adj_limit(length $rec); 269} 270 271sub FETCHSIZE { 272 my $self = shift; 273 my $n = $self->{eof} ? $#{$self->{offsets}} : $self->_fill_offsets; 274 275 my $top_deferred = $self->_defer_max; 276 $n = $top_deferred+1 if defined $top_deferred && $n < $top_deferred+1; 277 $n; 278} 279 280sub STORESIZE { 281 my ($self, $len) = @_; 282 283 if ($self->{autodefer}) { 284 $self->_annotate_ad_history('STORESIZE'); 285 } 286 287 my $olen = $self->FETCHSIZE; 288 return if $len == $olen; # Woo-hoo! 289 290 # file gets longer 291 if ($len > $olen) { 292 if ($self->_is_deferring) { 293 for ($olen .. $len-1) { 294 $self->_store_deferred($_, $self->{recsep}); 295 } 296 } else { 297 $self->_extend_file_to($len); 298 } 299 return; 300 } 301 302 # file gets shorter 303 if ($self->_is_deferring) { 304 # TODO maybe replace this with map-plus-assignment? 305 for (grep $_ >= $len, keys %{$self->{deferred}}) { 306 $self->_delete_deferred($_); 307 } 308 $self->{deferred_max} = $len-1; 309 } 310 311 $self->_seek($len); 312 $self->_chop_file; 313 $#{$self->{offsets}} = $len; 314# $self->{offsets}[0] = 0; # in case we just chopped this 315 316 $self->{cache}->remove(grep $_ >= $len, $self->{cache}->ckeys); 317} 318 319### OPTIMIZE ME 320### It should not be necessary to do FETCHSIZE 321### Just seek to the end of the file. 322sub PUSH { 323 my $self = shift; 324 $self->SPLICE($self->FETCHSIZE, scalar(@_), @_); 325 326 # No need to return: 327 # $self->FETCHSIZE; # because av.c takes care of this for me 328} 329 330sub POP { 331 my $self = shift; 332 my $size = $self->FETCHSIZE; 333 return if $size == 0; 334# print STDERR "# POPPITY POP POP POP\n"; 335 scalar $self->SPLICE($size-1, 1); 336} 337 338sub SHIFT { 339 my $self = shift; 340 scalar $self->SPLICE(0, 1); 341} 342 343sub UNSHIFT { 344 my $self = shift; 345 $self->SPLICE(0, 0, @_); 346 # $self->FETCHSIZE; # av.c takes care of this for me 347} 348 349sub CLEAR { 350 my $self = shift; 351 352 if ($self->{autodefer}) { 353 $self->_annotate_ad_history('CLEAR'); 354 } 355 356 $self->_seekb(0); 357 $self->_chop_file; 358 $self->{cache}->set_limit($self->{memory}); 359 $self->{cache}->empty; 360 @{$self->{offsets}} = (0); 361 %{$self->{deferred}}= (); 362 $self->{deferred_s} = 0; 363 $self->{deferred_max} = -1; 364} 365 366sub EXTEND { 367 my ($self, $n) = @_; 368 369 # No need to pre-extend anything in this case 370 return if $self->_is_deferring; 371 372 $self->_fill_offsets_to($n); 373 $self->_extend_file_to($n); 374} 375 376sub DELETE { 377 my ($self, $n) = @_; 378 379 if ($self->{autodefer}) { 380 $self->_annotate_ad_history('DELETE'); 381 } 382 383 my $lastrec = $self->FETCHSIZE-1; 384 my $rec = $self->FETCH($n); 385 $self->_delete_deferred($n) if $self->_is_deferring; 386 if ($n == $lastrec) { 387 $self->_seek($n); 388 $self->_chop_file; 389 $#{$self->{offsets}}--; 390 $self->{cache}->remove($n); 391 # perhaps in this case I should also remove trailing null records? 392 # 20020316 393 # Note that delete @a[-3..-1] deletes the records in the wrong order, 394 # so we only chop the very last one out of the file. We could repair this 395 # by tracking deleted records inside the object. 396 } elsif ($n < $lastrec) { 397 $self->STORE($n, ""); 398 } 399 $rec; 400} 401 402sub EXISTS { 403 my ($self, $n) = @_; 404 return 1 if exists $self->{deferred}{$n}; 405 $n < $self->FETCHSIZE; 406} 407 408sub SPLICE { 409 my $self = shift; 410 411 if ($self->{autodefer}) { 412 $self->_annotate_ad_history('SPLICE'); 413 } 414 415 $self->_flush if $self->_is_deferring; # move this up? 416 if (wantarray) { 417 $self->_chomp(my @a = $self->_splice(@_)); 418 @a; 419 } else { 420 $self->_chomp1(scalar $self->_splice(@_)); 421 } 422} 423 424sub DESTROY { 425 my $self = shift; 426 $self->flush if $self->_is_deferring; 427 $self->{cache}->delink if defined $self->{cache}; # break circular link 428 if ($self->{fh} and $self->{ourfh}) { 429 delete $self->{ourfh}; 430 close delete $self->{fh}; 431 } 432} 433 434sub _splice { 435 my ($self, $pos, $nrecs, @data) = @_; 436 my @result; 437 438 $pos = 0 unless defined $pos; 439 440 # Deal with negative and other out-of-range positions 441 # Also set default for $nrecs 442 { 443 my $oldsize = $self->FETCHSIZE; 444 $nrecs = $oldsize unless defined $nrecs; 445 my $oldpos = $pos; 446 447 if ($pos < 0) { 448 $pos += $oldsize; 449 if ($pos < 0) { 450 croak "Modification of non-creatable array value attempted, " . 451 "subscript $oldpos"; 452 } 453 } 454 455 if ($pos > $oldsize) { 456 return unless @data; 457 $pos = $oldsize; # This is what perl does for normal arrays 458 } 459 460 # The manual is very unclear here 461 if ($nrecs < 0) { 462 $nrecs = $oldsize - $pos + $nrecs; 463 $nrecs = 0 if $nrecs < 0; 464 } 465 466 # nrecs is too big---it really means "until the end" 467 # 20030507 468 if ($nrecs + $pos > $oldsize) { 469 $nrecs = $oldsize - $pos; 470 } 471 } 472 473 $self->_fixrecs(@data); 474 my $data = join '', @data; 475 my $datalen = length $data; 476 my $oldlen = 0; 477 478 # compute length of data being removed 479 for ($pos .. $pos+$nrecs-1) { 480 last unless defined $self->_fill_offsets_to($_); 481 my $rec = $self->_fetch($_); 482 last unless defined $rec; 483 push @result, $rec; 484 485 # Why don't we just use length($rec) here? 486 # Because that record might have come from the cache. _splice 487 # might have been called to flush out the deferred-write records, 488 # and in this case length($rec) is the length of the record to be 489 # *written*, not the length of the actual record in the file. But 490 # the offsets are still true. 20020322 491 $oldlen += $self->{offsets}[$_+1] - $self->{offsets}[$_] 492 if defined $self->{offsets}[$_+1]; 493 } 494 $self->_fill_offsets_to($pos+$nrecs); 495 496 # Modify the file 497 $self->_mtwrite($data, $self->{offsets}[$pos], $oldlen); 498 # Adjust the offsets table 499 $self->_oadjust([$pos, $nrecs, @data]); 500 501 { # Take this read cache stuff out into a separate function 502 # You made a half-attempt to put it into _oadjust. 503 # Finish something like that up eventually. 504 # STORE also needs to do something similarish 505 506 # update the read cache, part 1 507 # modified records 508 for ($pos .. $pos+$nrecs-1) { 509 my $new = $data[$_-$pos]; 510 if (defined $new) { 511 $self->{cache}->update($_, $new); 512 } else { 513 $self->{cache}->remove($_); 514 } 515 } 516 517 # update the read cache, part 2 518 # moved records - records past the site of the change 519 # need to be renumbered 520 # Maybe merge this with the previous block? 521 { 522 my @oldkeys = grep $_ >= $pos + $nrecs, $self->{cache}->ckeys; 523 my @newkeys = map $_-$nrecs+@data, @oldkeys; 524 $self->{cache}->rekey(\@oldkeys, \@newkeys); 525 } 526 527 # Now there might be too much data in the cache, if we spliced out 528 # some short records and spliced in some long ones. If so, flush 529 # the cache. 530 $self->_cache_flush; 531 } 532 533 # Yes, the return value of 'splice' *is* actually this complicated 534 wantarray ? @result : @result ? $result[-1] : undef; 535} 536 537 538# write data into the file 539# $data is the data to be written. 540# it should be written at position $pos, and should overwrite 541# exactly $len of the following bytes. 542# Note that if length($data) > $len, the subsequent bytes will have to 543# be moved up, and if length($data) < $len, they will have to 544# be moved down 545sub _twrite { 546 my ($self, $data, $pos, $len) = @_; 547 548 unless (defined $pos) { 549 die "\$pos was undefined in _twrite"; 550 } 551 552 my $len_diff = length($data) - $len; 553 554 if ($len_diff == 0) { # Woo-hoo! 555 my $fh = $self->{fh}; 556 $self->_seekb($pos); 557 $self->_write_record($data); 558 return; # well, that was easy. 559 } 560 561 # the two records are of different lengths 562 # our strategy here: rewrite the tail of the file, 563 # reading ahead one buffer at a time 564 # $bufsize is required to be at least as large as the data we're overwriting 565 my $bufsize = _bufsize($len_diff); 566 my ($writepos, $readpos) = ($pos, $pos+$len); 567 my $next_block; 568 my $more_data; 569 570 # Seems like there ought to be a way to avoid the repeated code 571 # and the special case here. The read(1) is also a little weird. 572 # Think about this. 573 do { 574 $self->_seekb($readpos); 575 my $br = read $self->{fh}, $next_block, $bufsize; 576 $more_data = read $self->{fh}, my($dummy), 1; 577 $self->_seekb($writepos); 578 $self->_write_record($data); 579 $readpos += $br; 580 $writepos += length $data; 581 $data = $next_block; 582 } while $more_data; 583 $self->_seekb($writepos); 584 $self->_write_record($next_block); 585 586 # There might be leftover data at the end of the file 587 $self->_chop_file if $len_diff < 0; 588} 589 590# _iwrite(D, S, E) 591# Insert text D at position S. 592# Let C = E-S-|D|. If C < 0; die. 593# Data in [S,S+C) is copied to [S+D,S+D+C) = [S+D,E). 594# Data in [S+C = E-D, E) is returned. Data in [E, oo) is untouched. 595# 596# In a later version, don't read the entire intervening area into 597# memory at once; do the copying block by block. 598sub _iwrite { 599 my $self = shift; 600 my ($D, $s, $e) = @_; 601 my $d = length $D; 602 my $c = $e-$s-$d; 603 local *FH = $self->{fh}; 604 confess "Not enough space to insert $d bytes between $s and $e" 605 if $c < 0; 606 confess "[$s,$e) is an invalid insertion range" if $e < $s; 607 608 $self->_seekb($s); 609 read FH, my $buf, $e-$s; 610 611 $D .= substr($buf, 0, $c, ""); 612 613 $self->_seekb($s); 614 $self->_write_record($D); 615 616 return $buf; 617} 618 619# Like _twrite, but the data-pos-len triple may be repeated; you may 620# write several chunks. All the writing will be done in 621# one pass. Chunks SHALL be in ascending order and SHALL NOT overlap. 622sub _mtwrite { 623 my $self = shift; 624 my $unwritten = ""; 625 my $delta = 0; 626 627 @_ % 3 == 0 628 or die "Arguments to _mtwrite did not come in groups of three"; 629 630 while (@_) { 631 my ($data, $pos, $len) = splice @_, 0, 3; 632 my $end = $pos + $len; # The OLD end of the segment to be replaced 633 $data = $unwritten . $data; 634 $delta -= length($unwritten); 635 $unwritten = ""; 636 $pos += $delta; # This is where the data goes now 637 my $dlen = length $data; 638 $self->_seekb($pos); 639 if ($len >= $dlen) { # the data will fit 640 $self->_write_record($data); 641 $delta += ($dlen - $len); # everything following moves down by this much 642 $data = ""; # All the data in the buffer has been written 643 } else { # won't fit 644 my $writable = substr($data, 0, $len - $delta, ""); 645 $self->_write_record($writable); 646 $delta += ($dlen - $len); # everything following moves down by this much 647 } 648 649 # At this point we've written some but maybe not all of the data. 650 # There might be a gap to close up, or $data might still contain a 651 # bunch of unwritten data that didn't fit. 652 my $ndlen = length $data; 653 if ($delta == 0) { 654 $self->_write_record($data); 655 } elsif ($delta < 0) { 656 # upcopy (close up gap) 657 if (@_) { 658 $self->_upcopy($end, $end + $delta, $_[1] - $end); 659 } else { 660 $self->_upcopy($end, $end + $delta); 661 } 662 } else { 663 # downcopy (insert data that didn't fit; replace this data in memory 664 # with _later_ data that doesn't fit) 665 if (@_) { 666 $unwritten = $self->_downcopy($data, $end, $_[1] - $end); 667 } else { 668 # Make the file longer to accommodate the last segment that doesn't 669 $unwritten = $self->_downcopy($data, $end); 670 } 671 } 672 } 673} 674 675# Copy block of data of length $len from position $spos to position $dpos 676# $dpos must be <= $spos 677# 678# If $len is undefined, go all the way to the end of the file 679# and then truncate it ($spos - $dpos bytes will be removed) 680sub _upcopy { 681 my $blocksize = 8192; 682 my ($self, $spos, $dpos, $len) = @_; 683 if ($dpos > $spos) { 684 die "source ($spos) was upstream of destination ($dpos) in _upcopy"; 685 } elsif ($dpos == $spos) { 686 return; 687 } 688 689 while (! defined ($len) || $len > 0) { 690 my $readsize = ! defined($len) ? $blocksize 691 : $len > $blocksize ? $blocksize 692 : $len; 693 694 my $fh = $self->{fh}; 695 $self->_seekb($spos); 696 my $bytes_read = read $fh, my($data), $readsize; 697 $self->_seekb($dpos); 698 if ($data eq "") { 699 $self->_chop_file; 700 last; 701 } 702 $self->_write_record($data); 703 $spos += $bytes_read; 704 $dpos += $bytes_read; 705 $len -= $bytes_read if defined $len; 706 } 707} 708 709# Write $data into a block of length $len at position $pos, 710# moving everything in the block forwards to make room. 711# Instead of writing the last length($data) bytes from the block 712# (because there isn't room for them any longer) return them. 713# 714# Undefined $len means 'until the end of the file' 715sub _downcopy { 716 my $blocksize = 8192; 717 my ($self, $data, $pos, $len) = @_; 718 my $fh = $self->{fh}; 719 720 while (! defined $len || $len > 0) { 721 my $readsize = ! defined($len) ? $blocksize 722 : $len > $blocksize? $blocksize : $len; 723 $self->_seekb($pos); 724 read $fh, my($old), $readsize; 725 my $last_read_was_short = length($old) < $readsize; 726 $data .= $old; 727 my $writable; 728 if ($last_read_was_short) { 729 # If last read was short, then $data now contains the entire rest 730 # of the file, so there's no need to write only one block of it 731 $writable = $data; 732 $data = ""; 733 } else { 734 $writable = substr($data, 0, $readsize, ""); 735 } 736 last if $writable eq ""; 737 $self->_seekb($pos); 738 $self->_write_record($writable); 739 last if $last_read_was_short && $data eq ""; 740 $len -= $readsize if defined $len; 741 $pos += $readsize; 742 } 743 return $data; 744} 745 746# Adjust the object data structures following an '_mtwrite' 747# Arguments are 748# [$pos, $nrecs, @length] items 749# indicating that $nrecs records were removed at $recpos (a record offset) 750# and replaced with records of length @length... 751# Arguments guarantee that $recpos is strictly increasing. 752# No return value 753sub _oadjust { 754 my $self = shift; 755 my $delta = 0; 756 my $delta_recs = 0; 757 my $prev_end = -1; 758 759 for (@_) { 760 my ($pos, $nrecs, @data) = @$_; 761 $pos += $delta_recs; 762 763 # Adjust the offsets of the records after the previous batch up 764 # to the first new one of this batch 765 for my $i ($prev_end+2 .. $pos - 1) { 766 $self->{offsets}[$i] += $delta; 767 } 768 769 $prev_end = $pos + @data - 1; # last record moved on this pass 770 771 # Remove the offsets for the removed records; 772 # replace with the offsets for the inserted records 773 my @newoff = ($self->{offsets}[$pos] + $delta); 774 for my $i (0 .. $#data) { 775 my $newlen = length $data[$i]; 776 push @newoff, $newoff[$i] + $newlen; 777 $delta += $newlen; 778 } 779 780 for my $i ($pos .. $pos+$nrecs-1) { 781 last if $i+1 > $#{$self->{offsets}}; 782 my $oldlen = $self->{offsets}[$i+1] - $self->{offsets}[$i]; 783 $delta -= $oldlen; 784 } 785 786 # replace old offsets with new 787 splice @{$self->{offsets}}, $pos, $nrecs+1, @newoff; 788 # What if we just spliced out the end of the offsets table? 789 # shouldn't we clear $self->{eof}? Test for this XXX BUG TODO 790 791 $delta_recs += @data - $nrecs; # net change in total number of records 792 } 793 794 # The trailing records at the very end of the file 795 if ($delta) { 796 for my $i ($prev_end+2 .. $#{$self->{offsets}}) { 797 $self->{offsets}[$i] += $delta; 798 } 799 } 800 801 # If we scrubbed out all known offsets, regenerate the trivial table 802 # that knows that the file does indeed start at 0. 803 $self->{offsets}[0] = 0 unless @{$self->{offsets}}; 804 # If the file got longer, the offsets table is no longer complete 805 # $self->{eof} = 0 if $delta_recs > 0; 806 807 # Now there might be too much data in the cache, if we spliced out 808 # some short records and spliced in some long ones. If so, flush 809 # the cache. 810 $self->_cache_flush; 811} 812 813# If a record does not already end with the appropriate terminator 814# string, append one. 815sub _fixrecs { 816 my $self = shift; 817 for (@_) { 818 $_ = "" unless defined $_; 819 $_ .= $self->{recsep} 820 unless substr($_, - $self->{recseplen}) eq $self->{recsep}; 821 } 822} 823 824 825################################################################ 826# 827# Basic read, write, and seek 828# 829 830# seek to the beginning of record #$n 831# Assumes that the offsets table is already correctly populated 832# 833# Note that $n=-1 has a special meaning here: It means the start of 834# the last known record; this may or may not be the very last record 835# in the file, depending on whether the offsets table is fully populated. 836# 837sub _seek { 838 my ($self, $n) = @_; 839 my $o = $self->{offsets}[$n]; 840 defined($o) 841 or confess("logic error: undefined offset for record $n"); 842 seek $self->{fh}, $o, SEEK_SET 843 or confess "Couldn't seek filehandle: $!"; # "Should never happen." 844} 845 846# seek to byte $b in the file 847sub _seekb { 848 my ($self, $b) = @_; 849 seek $self->{fh}, $b, SEEK_SET 850 or die "Couldn't seek filehandle: $!"; # "Should never happen." 851} 852 853# populate the offsets table up to the beginning of record $n 854# return the offset of record $n 855sub _fill_offsets_to { 856 my ($self, $n) = @_; 857 858 return $self->{offsets}[$n] if $self->{eof}; 859 860 my $fh = $self->{fh}; 861 local *OFF = $self->{offsets}; 862 my $rec; 863 864 until ($#OFF >= $n) { 865 $self->_seek(-1); # tricky -- see comment at _seek 866 $rec = $self->_read_record; 867 if (defined $rec) { 868 push @OFF, int(tell $fh); # Tels says that int() saves memory here 869 } else { 870 $self->{eof} = 1; 871 return; # It turns out there is no such record 872 } 873 } 874 875 # we have now read all the records up to record n-1, 876 # so we can return the offset of record n 877 $OFF[$n]; 878} 879 880sub _fill_offsets { 881 my ($self) = @_; 882 883 my $fh = $self->{fh}; 884 local *OFF = $self->{offsets}; 885 886 $self->_seek(-1); # tricky -- see comment at _seek 887 888 # Tels says that inlining read_record() would make this loop 889 # five times faster. 20030508 890 while ( defined $self->_read_record()) { 891 # int() saves us memory here 892 push @OFF, int(tell $fh); 893 } 894 895 $self->{eof} = 1; 896 $#OFF; 897} 898 899# assumes that $rec is already suitably terminated 900sub _write_record { 901 my ($self, $rec) = @_; 902 my $fh = $self->{fh}; 903 local $\ = ""; 904 print $fh $rec 905 or die "Couldn't write record: $!"; # "Should never happen." 906# $self->{_written} += length($rec); 907} 908 909sub _read_record { 910 my $self = shift; 911 my $rec; 912 { local $/ = $self->{recsep}; 913 my $fh = $self->{fh}; 914 $rec = <$fh>; 915 } 916 return unless defined $rec; 917 if (substr($rec, -$self->{recseplen}) ne $self->{recsep}) { 918 # improperly terminated final record --- quietly fix it. 919# my $ac = substr($rec, -$self->{recseplen}); 920# $ac =~ s/\n/\\n/g; 921 $self->{sawlastrec} = 1; 922 unless ($self->{rdonly}) { 923 local $\ = ""; 924 my $fh = $self->{fh}; 925 print $fh $self->{recsep}; 926 } 927 $rec .= $self->{recsep}; 928 } 929# $self->{_read} += length($rec) if defined $rec; 930 $rec; 931} 932 933sub _rw_stats { 934 my $self = shift; 935 @{$self}{'_read', '_written'}; 936} 937 938################################################################ 939# 940# Read cache management 941 942sub _cache_flush { 943 my ($self) = @_; 944 $self->{cache}->reduce_size_to($self->{memory} - $self->{deferred_s}); 945} 946 947sub _cache_too_full { 948 my $self = shift; 949 $self->{cache}->bytes + $self->{deferred_s} >= $self->{memory}; 950} 951 952################################################################ 953# 954# File custodial services 955# 956 957 958# We have read to the end of the file and have the offsets table 959# entirely populated. Now we need to write a new record beyond 960# the end of the file. We prepare for this by writing 961# empty records into the file up to the position we want 962# 963# assumes that the offsets table already contains the offset of record $n, 964# if it exists, and extends to the end of the file if not. 965sub _extend_file_to { 966 my ($self, $n) = @_; 967 $self->_seek(-1); # position after the end of the last record 968 my $pos = $self->{offsets}[-1]; 969 970 # the offsets table has one entry more than the total number of records 971 my $extras = $n - $#{$self->{offsets}}; 972 973 # Todo : just use $self->{recsep} x $extras here? 974 while ($extras-- > 0) { 975 $self->_write_record($self->{recsep}); 976 push @{$self->{offsets}}, int(tell $self->{fh}); 977 } 978} 979 980# Truncate the file at the current position 981sub _chop_file { 982 my $self = shift; 983 truncate $self->{fh}, tell($self->{fh}); 984} 985 986 987# compute the size of a buffer suitable for moving 988# all the data in a file forward $n bytes 989# ($n may be negative) 990# The result should be at least $n. 991sub _bufsize { 992 my $n = shift; 993 return 8192 if $n <= 0; 994 my $b = $n & ~8191; 995 $b += 8192 if $n & 8191; 996 $b; 997} 998 999################################################################ 1000# 1001# Miscellaneous public methods 1002# 1003 1004# Lock the file 1005sub flock { 1006 my ($self, $op) = @_; 1007 unless (@_ <= 3) { 1008 my $pack = ref $self; 1009 croak "Usage: $pack\->flock([OPERATION])"; 1010 } 1011 my $fh = $self->{fh}; 1012 $op = LOCK_EX unless defined $op; 1013 my $locked = flock $fh, $op; 1014 1015 if ($locked && ($op & (LOCK_EX | LOCK_SH))) { 1016 # If you're locking the file, then presumably it's because 1017 # there might have been a write access by another process. 1018 # In that case, the read cache contents and the offsets table 1019 # might be invalid, so discard them. 20030508 1020 $self->{offsets} = [0]; 1021 $self->{cache}->empty; 1022 } 1023 1024 $locked; 1025} 1026 1027# Get/set autochomp option 1028sub autochomp { 1029 my $self = shift; 1030 if (@_) { 1031 my $old = $self->{autochomp}; 1032 $self->{autochomp} = shift; 1033 $old; 1034 } else { 1035 $self->{autochomp}; 1036 } 1037} 1038 1039# Get offset table entries; returns offset of nth record 1040sub offset { 1041 my ($self, $n) = @_; 1042 1043 if ($#{$self->{offsets}} < $n) { 1044 return if $self->{eof}; # request for record beyond the end of file 1045 my $o = $self->_fill_offsets_to($n); 1046 # If it's still undefined, there is no such record, so return 'undef' 1047 return unless defined $o; 1048 } 1049 1050 $self->{offsets}[$n]; 1051} 1052 1053sub discard_offsets { 1054 my $self = shift; 1055 $self->{offsets} = [0]; 1056} 1057 1058################################################################ 1059# 1060# Matters related to deferred writing 1061# 1062 1063# Defer writes 1064sub defer { 1065 my $self = shift; 1066 $self->_stop_autodeferring; 1067 @{$self->{ad_history}} = (); 1068 $self->{defer} = 1; 1069} 1070 1071# Flush deferred writes 1072# 1073# This could be better optimized to write the file in one pass, instead 1074# of one pass per block of records. But that will require modifications 1075# to _twrite, so I should have a good _twrite test suite first. 1076sub flush { 1077 my $self = shift; 1078 1079 $self->_flush; 1080 $self->{defer} = 0; 1081} 1082 1083sub _old_flush { 1084 my $self = shift; 1085 my @writable = sort {$a<=>$b} (keys %{$self->{deferred}}); 1086 1087 while (@writable) { 1088 # gather all consecutive records from the front of @writable 1089 my $first_rec = shift @writable; 1090 my $last_rec = $first_rec+1; 1091 ++$last_rec, shift @writable while @writable && $last_rec == $writable[0]; 1092 --$last_rec; 1093 $self->_fill_offsets_to($last_rec); 1094 $self->_extend_file_to($last_rec); 1095 $self->_splice($first_rec, $last_rec-$first_rec+1, 1096 @{$self->{deferred}}{$first_rec .. $last_rec}); 1097 } 1098 1099 $self->_discard; # clear out defered-write-cache 1100} 1101 1102sub _flush { 1103 my $self = shift; 1104 my @writable = sort {$a<=>$b} (keys %{$self->{deferred}}); 1105 my @args; 1106 my @adjust; 1107 1108 while (@writable) { 1109 # gather all consecutive records from the front of @writable 1110 my $first_rec = shift @writable; 1111 my $last_rec = $first_rec+1; 1112 ++$last_rec, shift @writable while @writable && $last_rec == $writable[0]; 1113 --$last_rec; 1114 my $end = $self->_fill_offsets_to($last_rec+1); 1115 if (not defined $end) { 1116 $self->_extend_file_to($last_rec); 1117 $end = $self->{offsets}[$last_rec]; 1118 } 1119 my ($start) = $self->{offsets}[$first_rec]; 1120 push @args, 1121 join("", @{$self->{deferred}}{$first_rec .. $last_rec}), # data 1122 $start, # position 1123 $end-$start; # length 1124 push @adjust, [$first_rec, # starting at this position... 1125 $last_rec-$first_rec+1, # this many records... 1126 # are replaced with these... 1127 @{$self->{deferred}}{$first_rec .. $last_rec}, 1128 ]; 1129 } 1130 1131 $self->_mtwrite(@args); # write multiple record groups 1132 $self->_discard; # clear out defered-write-cache 1133 $self->_oadjust(@adjust); 1134} 1135 1136# Discard deferred writes and disable future deferred writes 1137sub discard { 1138 my $self = shift; 1139 $self->_discard; 1140 $self->{defer} = 0; 1141} 1142 1143# Discard deferred writes, but retain old deferred writing mode 1144sub _discard { 1145 my $self = shift; 1146 %{$self->{deferred}} = (); 1147 $self->{deferred_s} = 0; 1148 $self->{deferred_max} = -1; 1149 $self->{cache}->set_limit($self->{memory}); 1150} 1151 1152# Deferred writing is enabled, either explicitly ($self->{defer}) 1153# or automatically ($self->{autodeferring}) 1154sub _is_deferring { 1155 my $self = shift; 1156 $self->{defer} || $self->{autodeferring}; 1157} 1158 1159# The largest record number of any deferred record 1160sub _defer_max { 1161 my $self = shift; 1162 return $self->{deferred_max} if defined $self->{deferred_max}; 1163 my $max = -1; 1164 for my $key (keys %{$self->{deferred}}) { 1165 $max = $key if $key > $max; 1166 } 1167 $self->{deferred_max} = $max; 1168 $max; 1169} 1170 1171################################################################ 1172# 1173# Matters related to autodeferment 1174# 1175 1176# Get/set autodefer option 1177sub autodefer { 1178 my $self = shift; 1179 if (@_) { 1180 my $old = $self->{autodefer}; 1181 $self->{autodefer} = shift; 1182 if ($old) { 1183 $self->_stop_autodeferring; 1184 @{$self->{ad_history}} = (); 1185 } 1186 $old; 1187 } else { 1188 $self->{autodefer}; 1189 } 1190} 1191 1192# The user is trying to store record #$n Record that in the history, 1193# and then enable (or disable) autodeferment if that seems useful. 1194# Note that it's OK for $n to be a non-number, as long as the function 1195# is prepared to deal with that. Nobody else looks at the ad_history. 1196# 1197# Now, what does the ad_history mean, and what is this function doing? 1198# Essentially, the idea is to enable autodeferring when we see that the 1199# user has made three consecutive STORE calls to three consecutive records. 1200# ("Three" is actually ->{autodefer_threshhold}.) 1201# A STORE call for record #$n inserts $n into the autodefer history, 1202# and if the history contains three consecutive records, we enable 1203# autodeferment. An ad_history of [X, Y] means that the most recent 1204# STOREs were for records X, X+1, ..., Y, in that order. 1205# 1206# Inserting a nonconsecutive number erases the history and starts over. 1207# 1208# Performing a special operation like SPLICE erases the history. 1209# 1210# There's one special case: CLEAR means that CLEAR was just called. 1211# In this case, we prime the history with [-2, -1] so that if the next 1212# write is for record 0, autodeferring goes on immediately. This is for 1213# the common special case of "@a = (...)". 1214# 1215sub _annotate_ad_history { 1216 my ($self, $n) = @_; 1217 return unless $self->{autodefer}; # feature is disabled 1218 return if $self->{defer}; # already in explicit defer mode 1219 return unless $self->{offsets}[-1] >= $self->{autodefer_filelen_threshhold}; 1220 1221 local *H = $self->{ad_history}; 1222 if ($n eq 'CLEAR') { 1223 @H = (-2, -1); # prime the history with fake records 1224 $self->_stop_autodeferring; 1225 } elsif ($n =~ /^\d+$/) { 1226 if (@H == 0) { 1227 @H = ($n, $n); 1228 } else { # @H == 2 1229 if ($H[1] == $n-1) { # another consecutive record 1230 $H[1]++; 1231 if ($H[1] - $H[0] + 1 >= $self->{autodefer_threshhold}) { 1232 $self->{autodeferring} = 1; 1233 } 1234 } else { # nonconsecutive- erase and start over 1235 @H = ($n, $n); 1236 $self->_stop_autodeferring; 1237 } 1238 } 1239 } else { # SPLICE or STORESIZE or some such 1240 @H = (); 1241 $self->_stop_autodeferring; 1242 } 1243} 1244 1245# If autodeferring was enabled, cut it out and discard the history 1246sub _stop_autodeferring { 1247 my $self = shift; 1248 if ($self->{autodeferring}) { 1249 $self->_flush; 1250 } 1251 $self->{autodeferring} = 0; 1252} 1253 1254################################################################ 1255 1256 1257# This is NOT a method. It is here for two reasons: 1258# 1. To factor a fairly complicated block out of the constructor 1259# 2. To provide access for the test suite, which need to be sure 1260# files are being written properly. 1261sub _default_recsep { 1262 my $recsep = $/; 1263 if ($^O eq 'MSWin32') { # Dos too? 1264 # Windows users expect files to be terminated with \r\n 1265 # But $/ is set to \n instead 1266 # Note that this also transforms \n\n into \r\n\r\n. 1267 # That is a feature. 1268 $recsep =~ s/\n/\r\n/g; 1269 } 1270 $recsep; 1271} 1272 1273# Utility function for _check_integrity 1274sub _ci_warn { 1275 my $msg = shift; 1276 $msg =~ s/\n/\\n/g; 1277 $msg =~ s/\r/\\r/g; 1278 print "# $msg\n"; 1279} 1280 1281# Given a file, make sure the cache is consistent with the 1282# file contents and the internal data structures are consistent with 1283# each other. Returns true if everything checks out, false if not 1284# 1285# The $file argument is no longer used. It is retained for compatibility 1286# with the existing test suite. 1287sub _check_integrity { 1288 my ($self, $file, $warn) = @_; 1289 my $rsl = $self->{recseplen}; 1290 my $rs = $self->{recsep}; 1291 my $good = 1; 1292 local *_; # local $_ does not work here 1293 local $DIAGNOSTIC = 1; 1294 1295 if (not defined $rs) { 1296 _ci_warn("recsep is undef!"); 1297 $good = 0; 1298 } elsif ($rs eq "") { 1299 _ci_warn("recsep is empty!"); 1300 $good = 0; 1301 } elsif ($rsl != length $rs) { 1302 my $ln = length $rs; 1303 _ci_warn("recsep <$rs> has length $ln, should be $rsl"); 1304 $good = 0; 1305 } 1306 1307 if (not defined $self->{offsets}[0]) { 1308 _ci_warn("offset 0 is missing!"); 1309 $good = 0; 1310 1311 } elsif ($self->{offsets}[0] != 0) { 1312 _ci_warn("rec 0: offset <$self->{offsets}[0]> s/b 0!"); 1313 $good = 0; 1314 } 1315 1316 my $cached = 0; 1317 { 1318 local *F = $self->{fh}; 1319 seek F, 0, SEEK_SET; 1320 local $. = 0; 1321 local $/ = $rs; 1322 1323 while (<F>) { 1324 my $n = $. - 1; 1325 my $cached = $self->{cache}->_produce($n); 1326 my $offset = $self->{offsets}[$.]; 1327 my $ao = tell F; 1328 if (defined $offset && $offset != $ao) { 1329 _ci_warn("rec $n: offset <$offset> actual <$ao>"); 1330 $good = 0; 1331 } 1332 if (defined $cached && $_ ne $cached && ! $self->{deferred}{$n}) { 1333 $good = 0; 1334 _ci_warn("rec $n: cached <$cached> actual <$_>"); 1335 } 1336 if (defined $cached && substr($cached, -$rsl) ne $rs) { 1337 $good = 0; 1338 _ci_warn("rec $n in the cache is missing the record separator"); 1339 } 1340 if (! defined $offset && $self->{eof}) { 1341 $good = 0; 1342 _ci_warn("The offset table was marked complete, but it is missing " . 1343 "element $."); 1344 } 1345 } 1346 if (@{$self->{offsets}} > $.+1) { 1347 $good = 0; 1348 my $n = @{$self->{offsets}}; 1349 _ci_warn("The offset table has $n items, but the file has only $."); 1350 } 1351 1352 my $deferring = $self->_is_deferring; 1353 for my $n ($self->{cache}->ckeys) { 1354 my $r = $self->{cache}->_produce($n); 1355 $cached += length($r); 1356 next if $n+1 <= $.; # checked this already 1357 _ci_warn("spurious caching of record $n"); 1358 $good = 0; 1359 } 1360 my $b = $self->{cache}->bytes; 1361 if ($cached != $b) { 1362 _ci_warn("cache size is $b, should be $cached"); 1363 $good = 0; 1364 } 1365 } 1366 1367 # That cache has its own set of tests 1368 $good = 0 unless $self->{cache}->_check_integrity; 1369 1370 # Now let's check the deferbuffer 1371 # Unless deferred writing is enabled, it should be empty 1372 if (! $self->_is_deferring && %{$self->{deferred}}) { 1373 _ci_warn("deferred writing disabled, but deferbuffer nonempty"); 1374 $good = 0; 1375 } 1376 1377 # Any record in the deferbuffer should *not* be present in the readcache 1378 my $deferred_s = 0; 1379 while (my ($n, $r) = each %{$self->{deferred}}) { 1380 $deferred_s += length($r); 1381 if (defined $self->{cache}->_produce($n)) { 1382 _ci_warn("record $n is in the deferbuffer *and* the readcache"); 1383 $good = 0; 1384 } 1385 if (substr($r, -$rsl) ne $rs) { 1386 _ci_warn("rec $n in the deferbuffer is missing the record separator"); 1387 $good = 0; 1388 } 1389 } 1390 1391 # Total size of deferbuffer should match internal total 1392 if ($deferred_s != $self->{deferred_s}) { 1393 _ci_warn("buffer size is $self->{deferred_s}, should be $deferred_s"); 1394 $good = 0; 1395 } 1396 1397 # Total size of deferbuffer should not exceed the specified limit 1398 if ($deferred_s > $self->{dw_size}) { 1399 _ci_warn("buffer size is $self->{deferred_s} which exceeds the limit " . 1400 "of $self->{dw_size}"); 1401 $good = 0; 1402 } 1403 1404 # Total size of cached data should not exceed the specified limit 1405 if ($deferred_s + $cached > $self->{memory}) { 1406 my $total = $deferred_s + $cached; 1407 _ci_warn("total stored data size is $total which exceeds the limit " . 1408 "of $self->{memory}"); 1409 $good = 0; 1410 } 1411 1412 # Stuff related to autodeferment 1413 if (!$self->{autodefer} && @{$self->{ad_history}}) { 1414 _ci_warn("autodefer is disabled, but ad_history is nonempty"); 1415 $good = 0; 1416 } 1417 if ($self->{autodeferring} && $self->{defer}) { 1418 _ci_warn("both autodeferring and explicit deferring are active"); 1419 $good = 0; 1420 } 1421 if (@{$self->{ad_history}} == 0) { 1422 # That's OK, no additional tests required 1423 } elsif (@{$self->{ad_history}} == 2) { 1424 my @non_number = grep !/^-?\d+$/, @{$self->{ad_history}}; 1425 if (@non_number) { 1426 my $msg; 1427 { local $" = ')('; 1428 $msg = "ad_history contains non-numbers (@{$self->{ad_history}})"; 1429 } 1430 _ci_warn($msg); 1431 $good = 0; 1432 } elsif ($self->{ad_history}[1] < $self->{ad_history}[0]) { 1433 _ci_warn("ad_history has nonsensical values @{$self->{ad_history}}"); 1434 $good = 0; 1435 } 1436 } else { 1437 _ci_warn("ad_history has bad length <@{$self->{ad_history}}>"); 1438 $good = 0; 1439 } 1440 1441 $good; 1442} 1443 1444################################################################ 1445# 1446# Tie::File::Cache 1447# 1448# Read cache 1449 1450package Tie::File::Cache; 1451$Tie::File::Cache::VERSION = $Tie::File::VERSION; 1452use Carp ':DEFAULT', 'confess'; 1453 1454sub HEAP () { 0 } 1455sub HASH () { 1 } 1456sub MAX () { 2 } 1457sub BYTES() { 3 } 1458#sub STAT () { 4 } # Array with request statistics for each record 1459#sub MISS () { 5 } # Total number of cache misses 1460#sub REQ () { 6 } # Total number of cache requests 1461use strict 'vars'; 1462 1463sub new { 1464 my ($pack, $max) = @_; 1465 local *_; 1466 croak "missing argument to ->new" unless defined $max; 1467 my $self = []; 1468 bless $self => $pack; 1469 @$self = (Tie::File::Heap->new($self), {}, $max, 0); 1470 $self; 1471} 1472 1473sub adj_limit { 1474 my ($self, $n) = @_; 1475 $self->[MAX] += $n; 1476} 1477 1478sub set_limit { 1479 my ($self, $n) = @_; 1480 $self->[MAX] = $n; 1481} 1482 1483# For internal use only 1484# Will be called by the heap structure to notify us that a certain 1485# piece of data has moved from one heap element to another. 1486# $k is the hash key of the item 1487# $n is the new index into the heap at which it is stored 1488# If $n is undefined, the item has been removed from the heap. 1489sub _heap_move { 1490 my ($self, $k, $n) = @_; 1491 if (defined $n) { 1492 $self->[HASH]{$k} = $n; 1493 } else { 1494 delete $self->[HASH]{$k}; 1495 } 1496} 1497 1498sub insert { 1499 my ($self, $key, $val) = @_; 1500 local *_; 1501 croak "missing argument to ->insert" unless defined $key; 1502 unless (defined $self->[MAX]) { 1503 confess "undefined max" ; 1504 } 1505 confess "undefined val" unless defined $val; 1506 return if length($val) > $self->[MAX]; 1507 1508# if ($self->[STAT]) { 1509# $self->[STAT][$key] = 1; 1510# return; 1511# } 1512 1513 my $oldnode = $self->[HASH]{$key}; 1514 if (defined $oldnode) { 1515 my $oldval = $self->[HEAP]->set_val($oldnode, $val); 1516 $self->[BYTES] -= length($oldval); 1517 } else { 1518 $self->[HEAP]->insert($key, $val); 1519 } 1520 $self->[BYTES] += length($val); 1521 $self->flush if $self->[BYTES] > $self->[MAX]; 1522} 1523 1524sub expire { 1525 my $self = shift; 1526 my $old_data = $self->[HEAP]->popheap; 1527 return unless defined $old_data; 1528 $self->[BYTES] -= length $old_data; 1529 $old_data; 1530} 1531 1532sub remove { 1533 my ($self, @keys) = @_; 1534 my @result; 1535 1536# if ($self->[STAT]) { 1537# for my $key (@keys) { 1538# $self->[STAT][$key] = 0; 1539# } 1540# return; 1541# } 1542 1543 for my $key (@keys) { 1544 next unless exists $self->[HASH]{$key}; 1545 my $old_data = $self->[HEAP]->remove($self->[HASH]{$key}); 1546 $self->[BYTES] -= length $old_data; 1547 push @result, $old_data; 1548 } 1549 @result; 1550} 1551 1552sub lookup { 1553 my ($self, $key) = @_; 1554 local *_; 1555 croak "missing argument to ->lookup" unless defined $key; 1556 1557# if ($self->[STAT]) { 1558# $self->[MISS]++ if $self->[STAT][$key]++ == 0; 1559# $self->[REQ]++; 1560# my $hit_rate = 1 - $self->[MISS] / $self->[REQ]; 1561# # Do some testing to determine this threshhold 1562# $#$self = STAT - 1 if $hit_rate > 0.20; 1563# } 1564 1565 if (exists $self->[HASH]{$key}) { 1566 $self->[HEAP]->lookup($self->[HASH]{$key}); 1567 } else { 1568 return; 1569 } 1570} 1571 1572# For internal use only 1573sub _produce { 1574 my ($self, $key) = @_; 1575 my $loc = $self->[HASH]{$key}; 1576 return unless defined $loc; 1577 $self->[HEAP][$loc][2]; 1578} 1579 1580# For internal use only 1581sub _promote { 1582 my ($self, $key) = @_; 1583 $self->[HEAP]->promote($self->[HASH]{$key}); 1584} 1585 1586sub empty { 1587 my ($self) = @_; 1588 %{$self->[HASH]} = (); 1589 $self->[BYTES] = 0; 1590 $self->[HEAP]->empty; 1591# @{$self->[STAT]} = (); 1592# $self->[MISS] = 0; 1593# $self->[REQ] = 0; 1594} 1595 1596sub is_empty { 1597 my ($self) = @_; 1598 keys %{$self->[HASH]} == 0; 1599} 1600 1601sub update { 1602 my ($self, $key, $val) = @_; 1603 local *_; 1604 croak "missing argument to ->update" unless defined $key; 1605 if (length($val) > $self->[MAX]) { 1606 my ($oldval) = $self->remove($key); 1607 $self->[BYTES] -= length($oldval) if defined $oldval; 1608 } elsif (exists $self->[HASH]{$key}) { 1609 my $oldval = $self->[HEAP]->set_val($self->[HASH]{$key}, $val); 1610 $self->[BYTES] += length($val); 1611 $self->[BYTES] -= length($oldval) if defined $oldval; 1612 } else { 1613 $self->[HEAP]->insert($key, $val); 1614 $self->[BYTES] += length($val); 1615 } 1616 $self->flush; 1617} 1618 1619sub rekey { 1620 my ($self, $okeys, $nkeys) = @_; 1621 local *_; 1622 my %map; 1623 @map{@$okeys} = @$nkeys; 1624 croak "missing argument to ->rekey" unless defined $nkeys; 1625 croak "length mismatch in ->rekey arguments" unless @$nkeys == @$okeys; 1626 my %adjusted; # map new keys to heap indices 1627 # You should be able to cut this to one loop TODO XXX 1628 for (0 .. $#$okeys) { 1629 $adjusted{$nkeys->[$_]} = delete $self->[HASH]{$okeys->[$_]}; 1630 } 1631 while (my ($nk, $ix) = each %adjusted) { 1632 # @{$self->[HASH]}{keys %adjusted} = values %adjusted; 1633 $self->[HEAP]->rekey($ix, $nk); 1634 $self->[HASH]{$nk} = $ix; 1635 } 1636} 1637 1638sub ckeys { 1639 my $self = shift; 1640 my @a = keys %{$self->[HASH]}; 1641 @a; 1642} 1643 1644# Return total amount of cached data 1645sub bytes { 1646 my $self = shift; 1647 $self->[BYTES]; 1648} 1649 1650# Expire oldest item from cache until cache size is smaller than $max 1651sub reduce_size_to { 1652 my ($self, $max) = @_; 1653 until ($self->[BYTES] <= $max) { 1654 # Note that Tie::File::Cache::expire has been inlined here 1655 my $old_data = $self->[HEAP]->popheap; 1656 return unless defined $old_data; 1657 $self->[BYTES] -= length $old_data; 1658 } 1659} 1660 1661# Why not just $self->reduce_size_to($self->[MAX])? 1662# Try this when things stabilize TODO XXX 1663# If the cache is too full, expire the oldest records 1664sub flush { 1665 my $self = shift; 1666 $self->reduce_size_to($self->[MAX]) if $self->[BYTES] > $self->[MAX]; 1667} 1668 1669# For internal use only 1670sub _produce_lru { 1671 my $self = shift; 1672 $self->[HEAP]->expire_order; 1673} 1674 1675BEGIN { *_ci_warn = \&Tie::File::_ci_warn } 1676 1677sub _check_integrity { # For CACHE 1678 my $self = shift; 1679 my $good = 1; 1680 1681 # Test HEAP 1682 $self->[HEAP]->_check_integrity or $good = 0; 1683 1684 # Test HASH 1685 my $bytes = 0; 1686 for my $k (keys %{$self->[HASH]}) { 1687 if ($k ne '0' && $k !~ /^[1-9][0-9]*$/) { 1688 $good = 0; 1689 _ci_warn "Cache hash key <$k> is non-numeric"; 1690 } 1691 1692 my $h = $self->[HASH]{$k}; 1693 if (! defined $h) { 1694 $good = 0; 1695 _ci_warn "Heap index number for key $k is undefined"; 1696 } elsif ($h == 0) { 1697 $good = 0; 1698 _ci_warn "Heap index number for key $k is zero"; 1699 } else { 1700 my $j = $self->[HEAP][$h]; 1701 if (! defined $j) { 1702 $good = 0; 1703 _ci_warn "Heap contents key $k (=> $h) are undefined"; 1704 } else { 1705 $bytes += length($j->[2]); 1706 if ($k ne $j->[1]) { 1707 $good = 0; 1708 _ci_warn "Heap contents key $k (=> $h) is $j->[1], should be $k"; 1709 } 1710 } 1711 } 1712 } 1713 1714 # Test BYTES 1715 if ($bytes != $self->[BYTES]) { 1716 $good = 0; 1717 _ci_warn "Total data in cache is $bytes, expected $self->[BYTES]"; 1718 } 1719 1720 # Test MAX 1721 if ($bytes > $self->[MAX]) { 1722 $good = 0; 1723 _ci_warn "Total data in cache is $bytes, exceeds maximum $self->[MAX]"; 1724 } 1725 1726 return $good; 1727} 1728 1729sub delink { 1730 my $self = shift; 1731 $self->[HEAP] = undef; # Bye bye heap 1732} 1733 1734################################################################ 1735# 1736# Tie::File::Heap 1737# 1738# Heap data structure for use by cache LRU routines 1739 1740package Tie::File::Heap; 1741use Carp ':DEFAULT', 'confess'; 1742$Tie::File::Heap::VERSION = $Tie::File::Cache::VERSION; 1743sub SEQ () { 0 }; 1744sub KEY () { 1 }; 1745sub DAT () { 2 }; 1746 1747sub new { 1748 my ($pack, $cache) = @_; 1749 die "$pack: Parent cache object $cache does not support _heap_move method" 1750 unless eval { $cache->can('_heap_move') }; 1751 my $self = [[0,$cache,0]]; 1752 bless $self => $pack; 1753} 1754 1755# Allocate a new sequence number, larger than all previously allocated numbers 1756sub _nseq { 1757 my $self = shift; 1758 $self->[0][0]++; 1759} 1760 1761sub _cache { 1762 my $self = shift; 1763 $self->[0][1]; 1764} 1765 1766sub _nelts { 1767 my $self = shift; 1768 $self->[0][2]; 1769} 1770 1771sub _nelts_inc { 1772 my $self = shift; 1773 ++$self->[0][2]; 1774} 1775 1776sub _nelts_dec { 1777 my $self = shift; 1778 --$self->[0][2]; 1779} 1780 1781sub is_empty { 1782 my $self = shift; 1783 $self->_nelts == 0; 1784} 1785 1786sub empty { 1787 my $self = shift; 1788 $#$self = 0; 1789 $self->[0][2] = 0; 1790 $self->[0][0] = 0; # might as well reset the sequence numbers 1791} 1792 1793# notify the parent cache object that we moved something 1794sub _heap_move { 1795 my $self = shift; 1796 $self->_cache->_heap_move(@_); 1797} 1798 1799# Insert a piece of data into the heap with the indicated sequence number. 1800# The item with the smallest sequence number is always at the top. 1801# If no sequence number is specified, allocate a new one and insert the 1802# item at the bottom. 1803sub insert { 1804 my ($self, $key, $data, $seq) = @_; 1805 $seq = $self->_nseq unless defined $seq; 1806 $self->_insert_new([$seq, $key, $data]); 1807} 1808 1809# Insert a new, fresh item at the bottom of the heap 1810sub _insert_new { 1811 my ($self, $item) = @_; 1812 my $i = @$self; 1813 $i = int($i/2) until defined $self->[$i/2]; 1814 $self->[$i] = $item; 1815 $self->[0][1]->_heap_move($self->[$i][KEY], $i); 1816 $self->_nelts_inc; 1817} 1818 1819# Insert [$data, $seq] pair at or below item $i in the heap. 1820# If $i is omitted, default to 1 (the top element.) 1821sub _insert { 1822 my ($self, $item, $i) = @_; 1823# $self->_check_loc($i) if defined $i; 1824 $i = 1 unless defined $i; 1825 until (! defined $self->[$i]) { 1826 if ($self->[$i][SEQ] > $item->[SEQ]) { # inserted item is older 1827 ($self->[$i], $item) = ($item, $self->[$i]); 1828 $self->[0][1]->_heap_move($self->[$i][KEY], $i); 1829 } 1830 # If either is undefined, go that way. Otherwise, choose at random 1831 my $dir; 1832 $dir = 0 if !defined $self->[2*$i]; 1833 $dir = 1 if !defined $self->[2*$i+1]; 1834 $dir = int(rand(2)) unless defined $dir; 1835 $i = 2*$i + $dir; 1836 } 1837 $self->[$i] = $item; 1838 $self->[0][1]->_heap_move($self->[$i][KEY], $i); 1839 $self->_nelts_inc; 1840} 1841 1842# Remove the item at node $i from the heap, moving child items upwards. 1843# The item with the smallest sequence number is always at the top. 1844# Moving items upwards maintains this condition. 1845# Return the removed item. Return undef if there was no item at node $i. 1846sub remove { 1847 my ($self, $i) = @_; 1848 $i = 1 unless defined $i; 1849 my $top = $self->[$i]; 1850 return unless defined $top; 1851 while (1) { 1852 my $ii; 1853 my ($L, $R) = (2*$i, 2*$i+1); 1854 1855 # If either is undefined, go the other way. 1856 # Otherwise, go towards the smallest. 1857 last unless defined $self->[$L] || defined $self->[$R]; 1858 $ii = $R if not defined $self->[$L]; 1859 $ii = $L if not defined $self->[$R]; 1860 unless (defined $ii) { 1861 $ii = $self->[$L][SEQ] < $self->[$R][SEQ] ? $L : $R; 1862 } 1863 1864 $self->[$i] = $self->[$ii]; # Promote child to fill vacated spot 1865 $self->[0][1]->_heap_move($self->[$i][KEY], $i); 1866 $i = $ii; # Fill new vacated spot 1867 } 1868 $self->[0][1]->_heap_move($top->[KEY], undef); 1869 undef $self->[$i]; 1870 $self->_nelts_dec; 1871 return $top->[DAT]; 1872} 1873 1874sub popheap { 1875 my $self = shift; 1876 $self->remove(1); 1877} 1878 1879# set the sequence number of the indicated item to a higher number 1880# than any other item in the heap, and bubble the item down to the 1881# bottom. 1882sub promote { 1883 my ($self, $n) = @_; 1884# $self->_check_loc($n); 1885 $self->[$n][SEQ] = $self->_nseq; 1886 my $i = $n; 1887 while (1) { 1888 my ($L, $R) = (2*$i, 2*$i+1); 1889 my $dir; 1890 last unless defined $self->[$L] || defined $self->[$R]; 1891 $dir = $R unless defined $self->[$L]; 1892 $dir = $L unless defined $self->[$R]; 1893 unless (defined $dir) { 1894 $dir = $self->[$L][SEQ] < $self->[$R][SEQ] ? $L : $R; 1895 } 1896 @{$self}[$i, $dir] = @{$self}[$dir, $i]; 1897 for ($i, $dir) { 1898 $self->[0][1]->_heap_move($self->[$_][KEY], $_) if defined $self->[$_]; 1899 } 1900 $i = $dir; 1901 } 1902} 1903 1904# Return item $n from the heap, promoting its LRU status 1905sub lookup { 1906 my ($self, $n) = @_; 1907# $self->_check_loc($n); 1908 my $val = $self->[$n]; 1909 $self->promote($n); 1910 $val->[DAT]; 1911} 1912 1913 1914# Assign a new value for node $n, promoting it to the bottom of the heap 1915sub set_val { 1916 my ($self, $n, $val) = @_; 1917# $self->_check_loc($n); 1918 my $oval = $self->[$n][DAT]; 1919 $self->[$n][DAT] = $val; 1920 $self->promote($n); 1921 return $oval; 1922} 1923 1924# The hash key has changed for an item; 1925# alter the heap's record of the hash key 1926sub rekey { 1927 my ($self, $n, $new_key) = @_; 1928# $self->_check_loc($n); 1929 $self->[$n][KEY] = $new_key; 1930} 1931 1932sub _check_loc { 1933 my ($self, $n) = @_; 1934 unless (1 || defined $self->[$n]) { 1935 confess "_check_loc($n) failed"; 1936 } 1937} 1938 1939BEGIN { *_ci_warn = \&Tie::File::_ci_warn } 1940 1941sub _check_integrity { 1942 my $self = shift; 1943 my $good = 1; 1944 my %seq; 1945 1946 unless (eval {$self->[0][1]->isa("Tie::File::Cache")}) { 1947 _ci_warn "Element 0 of heap corrupt"; 1948 $good = 0; 1949 } 1950 $good = 0 unless $self->_satisfies_heap_condition(1); 1951 for my $i (2 .. $#{$self}) { 1952 my $p = int($i/2); # index of parent node 1953 if (defined $self->[$i] && ! defined $self->[$p]) { 1954 _ci_warn "Element $i of heap defined, but parent $p isn't"; 1955 $good = 0; 1956 } 1957 1958 if (defined $self->[$i]) { 1959 if ($seq{$self->[$i][SEQ]}) { 1960 my $seq = $self->[$i][SEQ]; 1961 _ci_warn "Nodes $i and $seq{$seq} both have SEQ=$seq"; 1962 $good = 0; 1963 } else { 1964 $seq{$self->[$i][SEQ]} = $i; 1965 } 1966 } 1967 } 1968 1969 return $good; 1970} 1971 1972sub _satisfies_heap_condition { 1973 my $self = shift; 1974 my $n = shift || 1; 1975 my $good = 1; 1976 for (0, 1) { 1977 my $c = $n*2 + $_; 1978 next unless defined $self->[$c]; 1979 if ($self->[$n][SEQ] >= $self->[$c]) { 1980 _ci_warn "Node $n of heap does not predate node $c"; 1981 $good = 0 ; 1982 } 1983 $good = 0 unless $self->_satisfies_heap_condition($c); 1984 } 1985 return $good; 1986} 1987 1988# Return a list of all the values, sorted by expiration order 1989sub expire_order { 1990 my $self = shift; 1991 my @nodes = sort {$a->[SEQ] <=> $b->[SEQ]} $self->_nodes; 1992 map { $_->[KEY] } @nodes; 1993} 1994 1995sub _nodes { 1996 my $self = shift; 1997 my $i = shift || 1; 1998 return unless defined $self->[$i]; 1999 ($self->[$i], $self->_nodes($i*2), $self->_nodes($i*2+1)); 2000} 2001 20021; 2003 2004__END__ 2005 2006=head1 NAME 2007 2008Tie::File - Access the lines of a disk file via a Perl array 2009 2010=head1 SYNOPSIS 2011 2012 use Tie::File; 2013 2014 tie @array, 'Tie::File', filename or die ...; 2015 2016 $array[0] = 'blah'; # first line of the file is now 'blah' 2017 # (line numbering starts at 0) 2018 print $array[42]; # display line 43 of the file 2019 2020 $n_recs = @array; # how many records are in the file? 2021 $#array -= 2; # chop two records off the end 2022 2023 2024 for (@array) { 2025 s/PERL/Perl/g; # Replace PERL with Perl everywhere in the file 2026 } 2027 2028 # These are just like regular push, pop, unshift, shift, and splice 2029 # Except that they modify the file in the way you would expect 2030 2031 push @array, new recs...; 2032 my $r1 = pop @array; 2033 unshift @array, new recs...; 2034 my $r2 = shift @array; 2035 @old_recs = splice @array, 3, 7, new recs...; 2036 2037 untie @array; # all finished 2038 2039 2040=head1 DESCRIPTION 2041 2042C<Tie::File> represents a regular text file as a Perl array. Each 2043element in the array corresponds to a record in the file. The first 2044line of the file is element 0 of the array; the second line is element 20451, and so on. 2046 2047The file is I<not> loaded into memory, so this will work even for 2048gigantic files. 2049 2050Changes to the array are reflected in the file immediately. 2051 2052Lazy people and beginners may now stop reading the manual. 2053 2054=head2 C<recsep> 2055 2056What is a 'record'? By default, the meaning is the same as for the 2057C<E<lt>...E<gt>> operator: It's a string terminated by C<$/>, which is 2058probably C<"\n">. (Minor exception: on DOS and Win32 systems, a 2059'record' is a string terminated by C<"\r\n">.) You may change the 2060definition of "record" by supplying the C<recsep> option in the C<tie> 2061call: 2062 2063 tie @array, 'Tie::File', $file, recsep => 'es'; 2064 2065This says that records are delimited by the string C<es>. If the file 2066contained the following data: 2067 2068 Curse these pesky flies!\n 2069 2070then the C<@array> would appear to have four elements: 2071 2072 "Curse th" 2073 "e p" 2074 "ky fli" 2075 "!\n" 2076 2077An undefined value is not permitted as a record separator. Perl's 2078special "paragraph mode" semantics (E<agrave> la C<$/ = "">) are not 2079emulated. 2080 2081Records read from the tied array do not have the record separator 2082string on the end; this is to allow 2083 2084 $array[17] .= "extra"; 2085 2086to work as expected. 2087 2088(See L<"autochomp">, below.) Records stored into the array will have 2089the record separator string appended before they are written to the 2090file, if they don't have one already. For example, if the record 2091separator string is C<"\n">, then the following two lines do exactly 2092the same thing: 2093 2094 $array[17] = "Cherry pie"; 2095 $array[17] = "Cherry pie\n"; 2096 2097The result is that the contents of line 17 of the file will be 2098replaced with "Cherry pie"; a newline character will separate line 17 2099from line 18. This means that this code will do nothing: 2100 2101 chomp $array[17]; 2102 2103Because the C<chomp>ed value will have the separator reattached when 2104it is written back to the file. There is no way to create a file 2105whose trailing record separator string is missing. 2106 2107Inserting records that I<contain> the record separator string is not 2108supported by this module. It will probably produce a reasonable 2109result, but what this result will be may change in a future version. 2110Use 'splice' to insert records or to replace one record with several. 2111 2112=head2 C<autochomp> 2113 2114Normally, array elements have the record separator removed, so that if 2115the file contains the text 2116 2117 Gold 2118 Frankincense 2119 Myrrh 2120 2121the tied array will appear to contain C<("Gold", "Frankincense", 2122"Myrrh")>. If you set C<autochomp> to a false value, the record 2123separator will not be removed. If the file above was tied with 2124 2125 tie @gifts, "Tie::File", $gifts, autochomp => 0; 2126 2127then the array C<@gifts> would appear to contain C<("Gold\n", 2128"Frankincense\n", "Myrrh\n")>, or (on Win32 systems) C<("Gold\r\n", 2129"Frankincense\r\n", "Myrrh\r\n")>. 2130 2131=head2 C<mode> 2132 2133Normally, the specified file will be opened for read and write access, 2134and will be created if it does not exist. (That is, the flags 2135C<O_RDWR | O_CREAT> are supplied in the C<open> call.) If you want to 2136change this, you may supply alternative flags in the C<mode> option. 2137See L<Fcntl> for a listing of available flags. 2138For example: 2139 2140 # open the file if it exists, but fail if it does not exist 2141 use Fcntl 'O_RDWR'; 2142 tie @array, 'Tie::File', $file, mode => O_RDWR; 2143 2144 # create the file if it does not exist 2145 use Fcntl 'O_RDWR', 'O_CREAT'; 2146 tie @array, 'Tie::File', $file, mode => O_RDWR | O_CREAT; 2147 2148 # open an existing file in read-only mode 2149 use Fcntl 'O_RDONLY'; 2150 tie @array, 'Tie::File', $file, mode => O_RDONLY; 2151 2152Opening the data file in write-only or append mode is not supported. 2153 2154=head2 C<memory> 2155 2156This is an upper limit on the amount of memory that C<Tie::File> will 2157consume at any time while managing the file. This is used for two 2158things: managing the I<read cache> and managing the I<deferred write 2159buffer>. 2160 2161Records read in from the file are cached, to avoid having to re-read 2162them repeatedly. If you read the same record twice, the first time it 2163will be stored in memory, and the second time it will be fetched from 2164the I<read cache>. The amount of data in the read cache will not 2165exceed the value you specified for C<memory>. If C<Tie::File> wants 2166to cache a new record, but the read cache is full, it will make room 2167by expiring the least-recently visited records from the read cache. 2168 2169The default memory limit is 2Mib. You can adjust the maximum read 2170cache size by supplying the C<memory> option. The argument is the 2171desired cache size, in bytes. 2172 2173 # I have a lot of memory, so use a large cache to speed up access 2174 tie @array, 'Tie::File', $file, memory => 20_000_000; 2175 2176Setting the memory limit to 0 will inhibit caching; records will be 2177fetched from disk every time you examine them. 2178 2179The C<memory> value is not an absolute or exact limit on the memory 2180used. C<Tie::File> objects contains some structures besides the read 2181cache and the deferred write buffer, whose sizes are not charged 2182against C<memory>. 2183 2184The cache itself consumes about 310 bytes per cached record, so if 2185your file has many short records, you may want to decrease the cache 2186memory limit, or else the cache overhead may exceed the size of the 2187cached data. 2188 2189 2190=head2 C<dw_size> 2191 2192(This is an advanced feature. Skip this section on first reading.) 2193 2194If you use deferred writing (See L<"Deferred Writing">, below) then 2195data you write into the array will not be written directly to the 2196file; instead, it will be saved in the I<deferred write buffer> to be 2197written out later. Data in the deferred write buffer is also charged 2198against the memory limit you set with the C<memory> option. 2199 2200You may set the C<dw_size> option to limit the amount of data that can 2201be saved in the deferred write buffer. This limit may not exceed the 2202total memory limit. For example, if you set C<dw_size> to 1000 and 2203C<memory> to 2500, that means that no more than 1000 bytes of deferred 2204writes will be saved up. The space available for the read cache will 2205vary, but it will always be at least 1500 bytes (if the deferred write 2206buffer is full) and it could grow as large as 2500 bytes (if the 2207deferred write buffer is empty.) 2208 2209If you don't specify a C<dw_size>, it defaults to the entire memory 2210limit. 2211 2212=head2 Option Format 2213 2214C<-mode> is a synonym for C<mode>. C<-recsep> is a synonym for 2215C<recsep>. C<-memory> is a synonym for C<memory>. You get the 2216idea. 2217 2218=head1 Public Methods 2219 2220The C<tie> call returns an object, say C<$o>. You may call 2221 2222 $rec = $o->FETCH($n); 2223 $o->STORE($n, $rec); 2224 2225to fetch or store the record at line C<$n>, respectively; similarly 2226the other tied array methods. (See L<perltie> for details.) You may 2227also call the following methods on this object: 2228 2229=head2 C<flock> 2230 2231 $o->flock(MODE) 2232 2233will lock the tied file. C<MODE> has the same meaning as the second 2234argument to the Perl built-in C<flock> function; for example 2235C<LOCK_SH> or C<LOCK_EX | LOCK_NB>. (These constants are provided by 2236the C<use Fcntl ':flock'> declaration.) 2237 2238C<MODE> is optional; the default is C<LOCK_EX>. 2239 2240C<Tie::File> maintains an internal table of the byte offset of each 2241record it has seen in the file. 2242 2243When you use C<flock> to lock the file, C<Tie::File> assumes that the 2244read cache is no longer trustworthy, because another process might 2245have modified the file since the last time it was read. Therefore, a 2246successful call to C<flock> discards the contents of the read cache 2247and the internal record offset table. 2248 2249C<Tie::File> promises that the following sequence of operations will 2250be safe: 2251 2252 my $o = tie @array, "Tie::File", $filename; 2253 $o->flock; 2254 2255In particular, C<Tie::File> will I<not> read or write the file during 2256the C<tie> call. (Exception: Using C<mode =E<gt> O_TRUNC> will, of 2257course, erase the file during the C<tie> call. If you want to do this 2258safely, then open the file without C<O_TRUNC>, lock the file, and use 2259C<@array = ()>.) 2260 2261The best way to unlock a file is to discard the object and untie the 2262array. It is probably unsafe to unlock the file without also untying 2263it, because if you do, changes may remain unwritten inside the object. 2264That is why there is no shortcut for unlocking. If you really want to 2265unlock the file prematurely, you know what to do; if you don't know 2266what to do, then don't do it. 2267 2268All the usual warnings about file locking apply here. In particular, 2269note that file locking in Perl is B<advisory>, which means that 2270holding a lock will not prevent anyone else from reading, writing, or 2271erasing the file; it only prevents them from getting another lock at 2272the same time. Locks are analogous to green traffic lights: If you 2273have a green light, that does not prevent the idiot coming the other 2274way from plowing into you sideways; it merely guarantees to you that 2275the idiot does not also have a green light at the same time. 2276 2277=head2 C<autochomp> 2278 2279 my $old_value = $o->autochomp(0); # disable autochomp option 2280 my $old_value = $o->autochomp(1); # enable autochomp option 2281 2282 my $ac = $o->autochomp(); # recover current value 2283 2284See L<"autochomp">, above. 2285 2286=head2 C<defer>, C<flush>, C<discard>, and C<autodefer> 2287 2288See L<"Deferred Writing">, below. 2289 2290=head2 C<offset> 2291 2292 $off = $o->offset($n); 2293 2294This method returns the byte offset of the start of the C<$n>th record 2295in the file. If there is no such record, it returns an undefined 2296value. 2297 2298=head1 Tying to an already-opened filehandle 2299 2300If C<$fh> is a filehandle, such as is returned by C<IO::File> or one 2301of the other C<IO> modules, you may use: 2302 2303 tie @array, 'Tie::File', $fh, ...; 2304 2305Similarly if you opened that handle C<FH> with regular C<open> or 2306C<sysopen>, you may use: 2307 2308 tie @array, 'Tie::File', \*FH, ...; 2309 2310Handles that were opened write-only won't work. Handles that were 2311opened read-only will work as long as you don't try to modify the 2312array. Handles must be attached to seekable sources of data---that 2313means no pipes or sockets. If C<Tie::File> can detect that you 2314supplied a non-seekable handle, the C<tie> call will throw an 2315exception. (On Unix systems, it can detect this.) 2316 2317Note that Tie::File will only close any filehandles that it opened 2318internally. If you passed it a filehandle as above, you "own" the 2319filehandle, and are responsible for closing it after you have untied 2320the @array. 2321 2322Tie::File calls C<binmode> on filehandles that it opens internally, 2323but not on filehandles passed in by the user. For consistency, 2324especially if using the tied files cross-platform, you may wish to 2325call C<binmode> on the filehandle prior to tying the file. 2326 2327=head1 Deferred Writing 2328 2329(This is an advanced feature. Skip this section on first reading.) 2330 2331Normally, modifying a C<Tie::File> array writes to the underlying file 2332immediately. Every assignment like C<$a[3] = ...> rewrites as much of 2333the file as is necessary; typically, everything from line 3 through 2334the end will need to be rewritten. This is the simplest and most 2335transparent behavior. Performance even for large files is reasonably 2336good. 2337 2338However, under some circumstances, this behavior may be excessively 2339slow. For example, suppose you have a million-record file, and you 2340want to do: 2341 2342 for (@FILE) { 2343 $_ = "> $_"; 2344 } 2345 2346The first time through the loop, you will rewrite the entire file, 2347from line 0 through the end. The second time through the loop, you 2348will rewrite the entire file from line 1 through the end. The third 2349time through the loop, you will rewrite the entire file from line 2 to 2350the end. And so on. 2351 2352If the performance in such cases is unacceptable, you may defer the 2353actual writing, and then have it done all at once. The following loop 2354will perform much better for large files: 2355 2356 (tied @a)->defer; 2357 for (@a) { 2358 $_ = "> $_"; 2359 } 2360 (tied @a)->flush; 2361 2362If C<Tie::File>'s memory limit is large enough, all the writing will 2363done in memory. Then, when you call C<-E<gt>flush>, the entire file 2364will be rewritten in a single pass. 2365 2366(Actually, the preceding discussion is something of a fib. You don't 2367need to enable deferred writing to get good performance for this 2368common case, because C<Tie::File> will do it for you automatically 2369unless you specifically tell it not to. See L</Autodeferring>, 2370below.) 2371 2372Calling C<-E<gt>flush> returns the array to immediate-write mode. If 2373you wish to discard the deferred writes, you may call C<-E<gt>discard> 2374instead of C<-E<gt>flush>. Note that in some cases, some of the data 2375will have been written already, and it will be too late for 2376C<-E<gt>discard> to discard all the changes. Support for 2377C<-E<gt>discard> may be withdrawn in a future version of C<Tie::File>. 2378 2379Deferred writes are cached in memory up to the limit specified by the 2380C<dw_size> option (see above). If the deferred-write buffer is full 2381and you try to write still more deferred data, the buffer will be 2382flushed. All buffered data will be written immediately, the buffer 2383will be emptied, and the now-empty space will be used for future 2384deferred writes. 2385 2386If the deferred-write buffer isn't yet full, but the total size of the 2387buffer and the read cache would exceed the C<memory> limit, the oldest 2388records will be expired from the read cache until the total size is 2389under the limit. 2390 2391C<push>, C<pop>, C<shift>, C<unshift>, and C<splice> cannot be 2392deferred. When you perform one of these operations, any deferred data 2393is written to the file and the operation is performed immediately. 2394This may change in a future version. 2395 2396If you resize the array with deferred writing enabled, the file will 2397be resized immediately, but deferred records will not be written. 2398This has a surprising consequence: C<@a = (...)> erases the file 2399immediately, but the writing of the actual data is deferred. This 2400might be a bug. If it is a bug, it will be fixed in a future version. 2401 2402=head2 Autodeferring 2403 2404C<Tie::File> tries to guess when deferred writing might be helpful, 2405and to turn it on and off automatically. 2406 2407 for (@a) { 2408 $_ = "> $_"; 2409 } 2410 2411In this example, only the first two assignments will be done 2412immediately; after this, all the changes to the file will be deferred 2413up to the user-specified memory limit. 2414 2415You should usually be able to ignore this and just use the module 2416without thinking about deferring. However, special applications may 2417require fine control over which writes are deferred, or may require 2418that all writes be immediate. To disable the autodeferment feature, 2419use 2420 2421 (tied @o)->autodefer(0); 2422 2423or 2424 2425 tie @array, 'Tie::File', $file, autodefer => 0; 2426 2427 2428Similarly, C<-E<gt>autodefer(1)> re-enables autodeferment, and 2429C<-E<gt>autodefer()> recovers the current value of the autodefer setting. 2430 2431 2432=head1 CONCURRENT ACCESS TO FILES 2433 2434Caching and deferred writing are inappropriate if you want the same 2435file to be accessed simultaneously from more than one process. Other 2436optimizations performed internally by this module are also 2437incompatible with concurrent access. A future version of this module will 2438support a C<concurrent =E<gt> 1> option that enables safe concurrent access. 2439 2440Previous versions of this documentation suggested using C<memory 2441=E<gt> 0> for safe concurrent access. This was mistaken. Tie::File 2442will not support safe concurrent access before version 0.96. 2443 2444=head1 CAVEATS 2445 2446(That's Latin for 'warnings'.) 2447 2448=over 4 2449 2450=item * 2451 2452Reasonable effort was made to make this module efficient. Nevertheless, 2453changing the size of a record in the middle of a large file will 2454always be fairly slow, because everything after the new record must be 2455moved. 2456 2457=item * 2458 2459The behavior of tied arrays is not precisely the same as for regular 2460arrays. For example: 2461 2462 # This DOES print "How unusual!" 2463 undef $a[10]; print "How unusual!\n" if defined $a[10]; 2464 2465C<undef>-ing a C<Tie::File> array element just blanks out the 2466corresponding record in the file. When you read it back again, you'll 2467get the empty string, so the supposedly-C<undef>'ed value will be 2468defined. Similarly, if you have C<autochomp> disabled, then 2469 2470 # This DOES print "How unusual!" if 'autochomp' is disabled 2471 undef $a[10]; 2472 print "How unusual!\n" if $a[10]; 2473 2474Because when C<autochomp> is disabled, C<$a[10]> will read back as 2475C<"\n"> (or whatever the record separator string is.) 2476 2477There are other minor differences, particularly regarding C<exists> 2478and C<delete>, but in general, the correspondence is extremely close. 2479 2480=item * 2481 2482I have supposed that since this module is concerned with file I/O, 2483almost all normal use of it will be heavily I/O bound. This means 2484that the time to maintain complicated data structures inside the 2485module will be dominated by the time to actually perform the I/O. 2486When there was an opportunity to spend CPU time to avoid doing I/O, I 2487usually tried to take it. 2488 2489=item * 2490 2491You might be tempted to think that deferred writing is like 2492transactions, with C<flush> as C<commit> and C<discard> as 2493C<rollback>, but it isn't, so don't. 2494 2495=item * 2496 2497There is a large memory overhead for each record offset and for each 2498cache entry: about 310 bytes per cached data record, and about 21 bytes 2499per offset table entry. 2500 2501The per-record overhead will limit the maximum number of records you 2502can access per file. Note that I<accessing> the length of the array 2503via C<$x = scalar @tied_file> accesses B<all> records and stores their 2504offsets. The same for C<foreach (@tied_file)>, even if you exit the 2505loop early. 2506 2507=back 2508 2509=head1 SUBCLASSING 2510 2511This version promises absolutely nothing about the internals, which 2512may change without notice. A future version of the module will have a 2513well-defined and stable subclassing API. 2514 2515=head1 WHAT ABOUT C<DB_File>? 2516 2517People sometimes point out that L<DB_File> will do something similar, 2518and ask why C<Tie::File> module is necessary. 2519 2520There are a number of reasons that you might prefer C<Tie::File>. 2521A list is available at C<L<http://perl.plover.com/TieFile/why-not-DB_File>>. 2522 2523=head1 AUTHOR 2524 2525Mark Jason Dominus 2526 2527To contact the author, send email to: C<mjd-perl-tiefile+@plover.com> 2528 2529To receive an announcement whenever a new version of this module is 2530released, send a blank email message to 2531C<mjd-perl-tiefile-subscribe@plover.com>. 2532 2533The most recent version of this module, including documentation and 2534any news of importance, will be available at 2535 2536 http://perl.plover.com/TieFile/ 2537 2538 2539=head1 LICENSE 2540 2541C<Tie::File> version 0.96 is copyright (C) 2003 Mark Jason Dominus. 2542 2543This library is free software; you may redistribute it and/or modify 2544it under the same terms as Perl itself. 2545 2546These terms are your choice of any of (1) the Perl Artistic Licence, 2547or (2) version 2 of the GNU General Public License as published by the 2548Free Software Foundation, or (3) any later version of the GNU General 2549Public License. 2550 2551This library is distributed in the hope that it will be useful, 2552but WITHOUT ANY WARRANTY; without even the implied warranty of 2553MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 2554GNU General Public License for more details. 2555 2556You should have received a copy of the GNU General Public License 2557along with this library program; it should be in the file C<COPYING>. 2558If not, write to the Free Software Foundation, Inc., 51 Franklin Street, 2559Fifth Floor, Boston, MA 02110-1301, USA 2560 2561For licensing inquiries, contact the author at: 2562 2563 Mark Jason Dominus 2564 255 S. Warnock St. 2565 Philadelphia, PA 19107 2566 2567=head1 WARRANTY 2568 2569C<Tie::File> version 0.98 comes with ABSOLUTELY NO WARRANTY. 2570For details, see the license. 2571 2572=head1 THANKS 2573 2574Gigantic thanks to Jarkko Hietaniemi, for agreeing to put this in the 2575core when I hadn't written it yet, and for generally being helpful, 2576supportive, and competent. (Usually the rule is "choose any one.") 2577Also big thanks to Abhijit Menon-Sen for all of the same things. 2578 2579Special thanks to Craig Berry and Peter Prymmer (for VMS portability 2580help), Randy Kobes (for Win32 portability help), Clinton Pierce and 2581Autrijus Tang (for heroic eleventh-hour Win32 testing above and beyond 2582the call of duty), Michael G Schwern (for testing advice), and the 2583rest of the CPAN testers (for testing generally). 2584 2585Special thanks to Tels for suggesting several speed and memory 2586optimizations. 2587 2588Additional thanks to: 2589Edward Avis / 2590Mattia Barbon / 2591Tom Christiansen / 2592Gerrit Haase / 2593Gurusamy Sarathy / 2594Jarkko Hietaniemi (again) / 2595Nikola Knezevic / 2596John Kominetz / 2597Nick Ing-Simmons / 2598Tassilo von Parseval / 2599H. Dieter Pearcey / 2600Slaven Rezic / 2601Eric Roode / 2602Peter Scott / 2603Peter Somu / 2604Autrijus Tang (again) / 2605Tels (again) / 2606Juerd Waalboer / 2607Todd Rinaldo 2608 2609=head1 TODO 2610 2611More tests. (Stuff I didn't think of yet.) 2612 2613Paragraph mode? 2614 2615Fixed-length mode. Leave-blanks mode. 2616 2617Maybe an autolocking mode? 2618 2619For many common uses of the module, the read cache is a liability. 2620For example, a program that inserts a single record, or that scans the 2621file once, will have a cache hit rate of zero. This suggests a major 2622optimization: The cache should be initially disabled. Here's a hybrid 2623approach: Initially, the cache is disabled, but the cache code 2624maintains statistics about how high the hit rate would be *if* it were 2625enabled. When it sees the hit rate get high enough, it enables 2626itself. The STAT comments in this code are the beginning of an 2627implementation of this. 2628 2629Record locking with fcntl()? Then the module might support an undo 2630log and get real transactions. What a tour de force that would be. 2631 2632Keeping track of the highest cached record. This would allow reads-in-a-row 2633to skip the cache lookup faster (if reading from 1..N with empty cache at 2634start, the last cached value will be always N-1). 2635 2636More tests. 2637 2638=cut 2639 2640