1 /*++ 2 3 Copyright (c) 1990-2000 Microsoft Corporation 4 5 Module Name: 6 7 AllocSup.c 8 9 Abstract: 10 11 This module implements the Allocation support routines for Fat. 12 13 14 --*/ 15 16 #include "fatprocs.h" 17 18 // 19 // The Bug check file id for this module 20 // 21 22 #define BugCheckFileId (FAT_BUG_CHECK_ALLOCSUP) 23 24 // 25 // Local debug trace level 26 // 27 28 #define Dbg (DEBUG_TRACE_ALLOCSUP) 29 30 #define FatMin(a, b) ((a) < (b) ? (a) : (b)) 31 32 // 33 // Define prefetch page count for the FAT 34 // 35 36 #define FAT_PREFETCH_PAGE_COUNT 0x100 37 38 // 39 // Local support routine prototypes 40 // 41 42 VOID 43 FatLookupFatEntry( 44 IN PIRP_CONTEXT IrpContext, 45 IN PVCB Vcb, 46 IN ULONG FatIndex, 47 IN OUT PULONG FatEntry, 48 IN OUT PFAT_ENUMERATION_CONTEXT Context 49 ); 50 51 VOID 52 FatSetFatRun( 53 IN PIRP_CONTEXT IrpContext, 54 IN PVCB Vcb, 55 IN ULONG StartingFatIndex, 56 IN ULONG ClusterCount, 57 IN BOOLEAN ChainTogether 58 ); 59 60 UCHAR 61 FatLogOf( 62 IN ULONG Value 63 ); 64 65 // 66 // Note that the KdPrint below will ONLY fire when the assert does. Leave it 67 // alone. 68 // 69 70 #if DBG 71 #define ASSERT_CURRENT_WINDOW_GOOD(VCB) { \ 72 ULONG FreeClusterBitMapClear; \ 73 NT_ASSERT( (VCB)->FreeClusterBitMap.Buffer != NULL ); \ 74 FreeClusterBitMapClear = RtlNumberOfClearBits(&(VCB)->FreeClusterBitMap); \ 75 if ((VCB)->CurrentWindow->ClustersFree != FreeClusterBitMapClear) { \ 76 KdPrint(("FAT: ClustersFree %x h != FreeClusterBitMapClear %x h\n", \ 77 (VCB)->CurrentWindow->ClustersFree, \ 78 FreeClusterBitMapClear)); \ 79 } \ 80 NT_ASSERT( (VCB)->CurrentWindow->ClustersFree == FreeClusterBitMapClear ); \ 81 } 82 #else 83 #define ASSERT_CURRENT_WINDOW_GOOD(VCB) 84 #endif 85 86 // 87 // The following macros provide a convenient way of hiding the details 88 // of bitmap allocation schemes. 89 // 90 91 92 // 93 // VOID 94 // FatLockFreeClusterBitMap ( 95 // IN PVCB Vcb 96 // ); 97 // 98 99 #define FatLockFreeClusterBitMap(VCB) { \ 100 NT_ASSERT(KeAreApcsDisabled()); \ 101 ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \ 102 ASSERT_CURRENT_WINDOW_GOOD(VCB) \ 103 } 104 105 // 106 // VOID 107 // FatUnlockFreeClusterBitMap ( 108 // IN PVCB Vcb 109 // ); 110 // 111 112 #define FatUnlockFreeClusterBitMap(VCB) { \ 113 ASSERT_CURRENT_WINDOW_GOOD(VCB) \ 114 NT_ASSERT(KeAreApcsDisabled()); \ 115 ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \ 116 } 117 118 // 119 // BOOLEAN 120 // FatIsClusterFree ( 121 // IN PIRP_CONTEXT IrpContext, 122 // IN PVCB Vcb, 123 // IN ULONG FatIndex 124 // ); 125 // 126 127 #define FatIsClusterFree(IRPCONTEXT,VCB,FAT_INDEX) \ 128 (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0) 129 130 // 131 // VOID 132 // FatFreeClusters ( 133 // IN PIRP_CONTEXT IrpContext, 134 // IN PVCB Vcb, 135 // IN ULONG FatIndex, 136 // IN ULONG ClusterCount 137 // ); 138 // 139 140 #define FatFreeClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \ 141 if ((CLUSTER_COUNT) == 1) { \ 142 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \ 143 } else { \ 144 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \ 145 } \ 146 } 147 148 // 149 // VOID 150 // FatAllocateClusters ( 151 // IN PIRP_CONTEXT IrpContext, 152 // IN PVCB Vcb, 153 // IN ULONG FatIndex, 154 // IN ULONG ClusterCount 155 // ); 156 // 157 158 #define FatAllocateClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \ 159 if ((CLUSTER_COUNT) == 1) { \ 160 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \ 161 } else { \ 162 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \ 163 } \ 164 } 165 166 // 167 // VOID 168 // FatUnreserveClusters ( 169 // IN PIRP_CONTEXT IrpContext, 170 // IN PVCB Vcb, 171 // IN ULONG FatIndex, 172 // IN ULONG ClusterCount 173 // ); 174 // 175 176 #define FatUnreserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \ 177 NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\ 178 NT_ASSERT( (FAT_INDEX) >= 2); \ 179 RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \ 180 if ((FAT_INDEX) < (VCB)->ClusterHint) { \ 181 (VCB)->ClusterHint = (FAT_INDEX); \ 182 } \ 183 } 184 185 // 186 // VOID 187 // FatReserveClusters ( 188 // IN PIRP_CONTEXT IrpContext, 189 // IN PVCB Vcb, 190 // IN ULONG FatIndex, 191 // IN ULONG ClusterCount 192 // ); 193 // 194 // Handle wrapping the hint back to the front. 195 // 196 197 #define FatReserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \ 198 ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \ 199 NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\ 200 NT_ASSERT( (FAT_INDEX) >= 2); \ 201 RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \ 202 \ 203 if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \ 204 _AfterRun = 2; \ 205 } \ 206 if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \ 207 (VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \ 208 if (1 == (VCB)->ClusterHint) { \ 209 (VCB)->ClusterHint = 2; \ 210 } \ 211 } \ 212 else { \ 213 (VCB)->ClusterHint = _AfterRun; \ 214 } \ 215 } 216 217 // 218 // ULONG 219 // FatFindFreeClusterRun ( 220 // IN PIRP_CONTEXT IrpContext, 221 // IN PVCB Vcb, 222 // IN ULONG ClusterCount, 223 // IN ULONG AlternateClusterHint 224 // ); 225 // 226 // Do a special check if only one cluster is desired. 227 // 228 229 #define FatFindFreeClusterRun(IRPCONTEXT,VCB,CLUSTER_COUNT,CLUSTER_HINT) ( \ 230 (CLUSTER_COUNT == 1) && \ 231 FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \ 232 (CLUSTER_HINT) : \ 233 RtlFindClearBits( &(VCB)->FreeClusterBitMap, \ 234 (CLUSTER_COUNT), \ 235 (CLUSTER_HINT) - 2) + 2 \ 236 ) 237 238 // 239 // FAT32: Define the maximum size of the FreeClusterBitMap to be the 240 // maximum size of a FAT16 FAT. If there are more clusters on the 241 // volume than can be represented by this many bytes of bitmap, the 242 // FAT will be split into "buckets", each of which does fit. 243 // 244 // Note this count is in clusters/bits of bitmap. 245 // 246 247 #define MAX_CLUSTER_BITMAP_SIZE (1 << 16) 248 249 // 250 // Calculate the window a given cluster number is in. 251 // 252 253 #define FatWindowOfCluster(C) (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE) 254 255 #ifdef ALLOC_PRAGMA 256 #pragma alloc_text(PAGE, FatAddFileAllocation) 257 #pragma alloc_text(PAGE, FatAllocateDiskSpace) 258 #pragma alloc_text(PAGE, FatDeallocateDiskSpace) 259 #pragma alloc_text(PAGE, FatExamineFatEntries) 260 #pragma alloc_text(PAGE, FatInterpretClusterType) 261 #pragma alloc_text(PAGE, FatLogOf) 262 #pragma alloc_text(PAGE, FatLookupFatEntry) 263 #pragma alloc_text(PAGE, FatLookupFileAllocation) 264 #pragma alloc_text(PAGE, FatLookupFileAllocationSize) 265 #pragma alloc_text(PAGE, FatMergeAllocation) 266 #pragma alloc_text(PAGE, FatSetFatEntry) 267 #pragma alloc_text(PAGE, FatSetFatRun) 268 #pragma alloc_text(PAGE, FatSetupAllocationSupport) 269 #pragma alloc_text(PAGE, FatSplitAllocation) 270 #pragma alloc_text(PAGE, FatTearDownAllocationSupport) 271 #pragma alloc_text(PAGE, FatTruncateFileAllocation) 272 #endif 273 274 #ifdef __REACTOS__ 275 static 276 #endif 277 INLINE 278 ULONG 279 FatSelectBestWindow( 280 IN PVCB Vcb 281 ) 282 /*++ 283 284 Routine Description: 285 286 Choose a window to allocate clusters from. Order of preference is: 287 288 1. First window with >50% free clusters 289 2. First empty window 290 3. Window with greatest number of free clusters. 291 292 Arguments: 293 294 Vcb - Supplies the Vcb for the volume 295 296 Return Value: 297 298 'Best window' number (index into Vcb->Windows[]) 299 300 --*/ 301 { 302 ULONG i, Fave = 0; 303 ULONG MaxFree = 0; 304 ULONG FirstEmpty = (ULONG)-1; 305 ULONG ClustersPerWindow = MAX_CLUSTER_BITMAP_SIZE; 306 307 NT_ASSERT( 1 != Vcb->NumberOfWindows); 308 309 for (i = 0; i < Vcb->NumberOfWindows; i++) { 310 311 if (Vcb->Windows[i].ClustersFree == ClustersPerWindow) { 312 313 if (-1 == FirstEmpty) { 314 315 // 316 // Keep note of the first empty window on the disc 317 // 318 319 FirstEmpty = i; 320 } 321 } 322 else if (Vcb->Windows[i].ClustersFree > MaxFree) { 323 324 // 325 // This window has the most free clusters, so far 326 // 327 328 MaxFree = Vcb->Windows[i].ClustersFree; 329 Fave = i; 330 331 // 332 // If this window has >50% free clusters, then we will take it, 333 // so don't bother considering more windows. 334 // 335 336 if (MaxFree >= (ClustersPerWindow >> 1)) { 337 338 break; 339 } 340 } 341 } 342 343 // 344 // If there were no windows with 50% or more freespace, then select the 345 // first empty window on the disc, if any - otherwise we'll just go with 346 // the one with the most free clusters. 347 // 348 349 if ((MaxFree < (ClustersPerWindow >> 1)) && (-1 != FirstEmpty)) { 350 351 Fave = FirstEmpty; 352 } 353 354 return Fave; 355 } 356 357 358 VOID 359 FatSetupAllocationSupport ( 360 IN PIRP_CONTEXT IrpContext, 361 IN PVCB Vcb 362 ) 363 364 /*++ 365 366 Routine Description: 367 368 This routine fills in the Allocation Support structure in the Vcb. 369 Most entries are computed using fat.h macros supplied with data from 370 the Bios Parameter Block. The free cluster count, however, requires 371 going to the Fat and actually counting free sectors. At the same time 372 the free cluster bit map is initalized. 373 374 Arguments: 375 376 Vcb - Supplies the Vcb to fill in. 377 378 --*/ 379 380 { 381 ULONG BitIndex; 382 ULONG ClustersDescribableByFat; 383 384 PAGED_CODE(); 385 386 DebugTrace(+1, Dbg, "FatSetupAllocationSupport\n", 0); 387 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 388 389 // 390 // Compute a number of fields for Vcb.AllocationSupport 391 // 392 393 Vcb->AllocationSupport.RootDirectoryLbo = FatRootDirectoryLbo( &Vcb->Bpb ); 394 Vcb->AllocationSupport.RootDirectorySize = FatRootDirectorySize( &Vcb->Bpb ); 395 396 Vcb->AllocationSupport.FileAreaLbo = FatFileAreaLbo( &Vcb->Bpb ); 397 398 Vcb->AllocationSupport.NumberOfClusters = FatNumberOfClusters( &Vcb->Bpb ); 399 400 Vcb->AllocationSupport.FatIndexBitSize = FatIndexBitSize( &Vcb->Bpb ); 401 402 Vcb->AllocationSupport.LogOfBytesPerSector = FatLogOf(Vcb->Bpb.BytesPerSector); 403 Vcb->AllocationSupport.LogOfBytesPerCluster = FatLogOf(FatBytesPerCluster( &Vcb->Bpb )); 404 Vcb->AllocationSupport.NumberOfFreeClusters = 0; 405 406 407 // 408 // Deal with a bug in DOS 5 format, if the Fat is not big enough to 409 // describe all the clusters on the disk, reduce this number. We expect 410 // that fat32 volumes will not have this problem. 411 // 412 // Turns out this was not a good assumption. We have to do this always now. 413 // 414 415 ClustersDescribableByFat = ( ((FatIsFat32(Vcb)? Vcb->Bpb.LargeSectorsPerFat : 416 Vcb->Bpb.SectorsPerFat) * 417 Vcb->Bpb.BytesPerSector * 8) 418 / FatIndexBitSize(&Vcb->Bpb) ) - 2; 419 420 if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByFat) { 421 422 Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByFat; 423 } 424 425 // 426 // Extend the virtual volume file to include the Fat 427 // 428 429 { 430 CC_FILE_SIZES FileSizes; 431 432 FileSizes.AllocationSize.QuadPart = 433 FileSizes.FileSize.QuadPart = (FatReservedBytes( &Vcb->Bpb ) + 434 FatBytesPerFat( &Vcb->Bpb )); 435 FileSizes.ValidDataLength = FatMaxLarge; 436 437 if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) { 438 439 FatInitializeCacheMap( Vcb->VirtualVolumeFile, 440 &FileSizes, 441 TRUE, 442 &FatData.CacheManagerNoOpCallbacks, 443 Vcb ); 444 445 } else { 446 447 CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes ); 448 } 449 } 450 451 _SEH2_TRY { 452 453 if (FatIsFat32(Vcb) && 454 Vcb->AllocationSupport.NumberOfClusters > MAX_CLUSTER_BITMAP_SIZE) { 455 456 Vcb->NumberOfWindows = (Vcb->AllocationSupport.NumberOfClusters + 457 MAX_CLUSTER_BITMAP_SIZE - 1) / 458 MAX_CLUSTER_BITMAP_SIZE; 459 460 } else { 461 462 Vcb->NumberOfWindows = 1; 463 } 464 465 Vcb->Windows = FsRtlAllocatePoolWithTag( PagedPool, 466 Vcb->NumberOfWindows * sizeof(FAT_WINDOW), 467 TAG_FAT_WINDOW ); 468 469 RtlInitializeBitMap( &Vcb->FreeClusterBitMap, 470 NULL, 471 0 ); 472 473 // 474 // Chose a FAT window to begin operation in. 475 // 476 477 if (Vcb->NumberOfWindows > 1) { 478 479 // 480 // Read the fat and count up free clusters. We bias by the two reserved 481 // entries in the FAT. 482 // 483 484 FatExamineFatEntries( IrpContext, Vcb, 485 2, 486 Vcb->AllocationSupport.NumberOfClusters + 2 - 1, 487 TRUE, 488 NULL, 489 NULL); 490 491 492 // 493 // Pick a window to begin allocating from 494 // 495 496 Vcb->CurrentWindow = &Vcb->Windows[ FatSelectBestWindow( Vcb)]; 497 498 } else { 499 500 Vcb->CurrentWindow = &Vcb->Windows[0]; 501 502 // 503 // Carefully bias ourselves by the two reserved entries in the FAT. 504 // 505 506 Vcb->CurrentWindow->FirstCluster = 2; 507 Vcb->CurrentWindow->LastCluster = Vcb->AllocationSupport.NumberOfClusters + 2 - 1; 508 } 509 510 // 511 // Now transition to the FAT window we have chosen. 512 // 513 514 FatExamineFatEntries( IrpContext, Vcb, 515 0, 516 0, 517 FALSE, 518 Vcb->CurrentWindow, 519 NULL); 520 521 // 522 // Now set the ClusterHint to the first free bit in our favorite 523 // window (except the ClusterHint is off by two). 524 // 525 526 Vcb->ClusterHint = 527 (BitIndex = RtlFindClearBits( &Vcb->FreeClusterBitMap, 1, 0 )) != -1 ? 528 BitIndex + 2 : 2; 529 530 } _SEH2_FINALLY { 531 532 DebugUnwind( FatSetupAllocationSupport ); 533 534 // 535 // If we hit an exception, back out. 536 // 537 538 if (_SEH2_AbnormalTermination()) { 539 540 FatTearDownAllocationSupport( IrpContext, Vcb ); 541 } 542 } _SEH2_END; 543 544 return; 545 } 546 547 548 VOID 549 FatTearDownAllocationSupport ( 550 IN PIRP_CONTEXT IrpContext, 551 IN PVCB Vcb 552 ) 553 554 /*++ 555 556 Routine Description: 557 558 This routine prepares the volume for closing. Specifically, we must 559 release the free fat bit map buffer, and uninitialize the dirty fat 560 Mcb. 561 562 Arguments: 563 564 Vcb - Supplies the Vcb to fill in. 565 566 Return Value: 567 568 VOID 569 570 --*/ 571 572 { 573 DebugTrace(+1, Dbg, "FatTearDownAllocationSupport\n", 0); 574 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 575 576 PAGED_CODE(); 577 578 // 579 // If there are FAT buckets, free them. 580 // 581 582 if ( Vcb->Windows != NULL ) { 583 584 ExFreePool( Vcb->Windows ); 585 Vcb->Windows = NULL; 586 } 587 588 // 589 // Free the memory associated with the free cluster bitmap. 590 // 591 592 if ( Vcb->FreeClusterBitMap.Buffer != NULL ) { 593 594 ExFreePool( Vcb->FreeClusterBitMap.Buffer ); 595 596 // 597 // NULL this field as an flag. 598 // 599 600 Vcb->FreeClusterBitMap.Buffer = NULL; 601 } 602 603 // 604 // And remove all the runs in the dirty fat Mcb 605 // 606 607 FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 0, 0xFFFFFFFF ); 608 609 DebugTrace(-1, Dbg, "FatTearDownAllocationSupport -> (VOID)\n", 0); 610 611 UNREFERENCED_PARAMETER( IrpContext ); 612 613 return; 614 } 615 616 617 _Requires_lock_held_(_Global_critical_region_) 618 VOID 619 FatLookupFileAllocation ( 620 IN PIRP_CONTEXT IrpContext, 621 IN PFCB FcbOrDcb, 622 IN VBO Vbo, 623 OUT PLBO Lbo, 624 OUT PULONG ByteCount, 625 OUT PBOOLEAN Allocated, 626 OUT PBOOLEAN EndOnMax, 627 OUT PULONG Index 628 ) 629 630 /*++ 631 632 Routine Description: 633 634 This routine looks up the existing mapping of VBO to LBO for a 635 file/directory. The information it queries is either stored in the 636 mcb field of the fcb/dcb or it is stored on in the fat table and 637 needs to be retrieved and decoded, and updated in the mcb. 638 639 Arguments: 640 641 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being queried 642 643 Vbo - Supplies the VBO whose LBO we want returned 644 645 Lbo - Receives the LBO corresponding to the input Vbo if one exists 646 647 ByteCount - Receives the number of bytes within the run the run 648 that correpond between the input vbo and output lbo. 649 650 Allocated - Receives TRUE if the Vbo does have a corresponding Lbo 651 and FALSE otherwise. 652 653 EndOnMax - Receives TRUE if the run ends in the maximal FAT cluster, 654 which results in a fractional bytecount. 655 656 Index - Receives the Index of the run 657 658 --*/ 659 660 { 661 VBO CurrentVbo; 662 LBO CurrentLbo; 663 LBO PriorLbo; 664 665 VBO FirstVboOfCurrentRun = 0; 666 LBO FirstLboOfCurrentRun; 667 668 BOOLEAN LastCluster; 669 ULONG Runs; 670 671 PVCB Vcb; 672 FAT_ENTRY FatEntry; 673 ULONG BytesPerCluster; 674 ULARGE_INTEGER BytesOnVolume; 675 676 FAT_ENUMERATION_CONTEXT Context; 677 678 PAGED_CODE(); 679 680 Vcb = FcbOrDcb->Vcb; 681 682 683 DebugTrace(+1, Dbg, "FatLookupFileAllocation\n", 0); 684 DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb); 685 DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo); 686 DebugTrace( 0, Dbg, " pLbo = %8lx\n", Lbo); 687 DebugTrace( 0, Dbg, " pByteCount = %8lx\n", ByteCount); 688 DebugTrace( 0, Dbg, " pAllocated = %8lx\n", Allocated); 689 690 Context.Bcb = NULL; 691 692 *EndOnMax = FALSE; 693 694 // 695 // Check the trivial case that the mapping is already in our 696 // Mcb. 697 // 698 699 if ( FatLookupMcbEntry(Vcb, &FcbOrDcb->Mcb, Vbo, Lbo, ByteCount, Index) ) { 700 701 *Allocated = TRUE; 702 703 NT_ASSERT( *ByteCount != 0 ); 704 705 // 706 // Detect the overflow case, trim and claim the condition. 707 // 708 709 if (Vbo + *ByteCount == 0) { 710 711 *EndOnMax = TRUE; 712 } 713 714 DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0); 715 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0); 716 return; 717 } 718 719 // 720 // Initialize the Vcb, the cluster size, LastCluster, and 721 // FirstLboOfCurrentRun (to be used as an indication of the first 722 // iteration through the following while loop). 723 // 724 725 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster; 726 727 BytesOnVolume.QuadPart = UInt32x32To64( Vcb->AllocationSupport.NumberOfClusters, BytesPerCluster ); 728 729 LastCluster = FALSE; 730 FirstLboOfCurrentRun = 0; 731 732 // 733 // Discard the case that the request extends beyond the end of 734 // allocation. Note that if the allocation size if not known 735 // AllocationSize is set to 0xffffffff. 736 // 737 738 if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) { 739 740 *Allocated = FALSE; 741 742 DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0); 743 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0); 744 return; 745 } 746 747 // 748 // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo 749 // and FatEntry to describe the beginning of the last entry in the Mcb. 750 // This is used as initialization for the following loop. 751 // 752 // If the Mcb was empty, we start at the beginning of the file with 753 // CurrentVbo set to 0 to indicate a new run. 754 // 755 756 if (FatLookupLastMcbEntry( Vcb, &FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo, &Runs )) { 757 758 DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1); 759 760 CurrentVbo -= (BytesPerCluster - 1); 761 CurrentLbo -= (BytesPerCluster - 1); 762 763 // 764 // Convert an index to a count. 765 // 766 767 Runs += 1; 768 769 } else { 770 771 DebugTrace( 0, Dbg, "Mcb empty.\n", 0); 772 773 // 774 // Check for an FcbOrDcb that has no allocation 775 // 776 777 if (FcbOrDcb->FirstClusterOfFile == 0) { 778 779 *Allocated = FALSE; 780 781 DebugTrace( 0, Dbg, "File has no allocation.\n", 0); 782 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0); 783 return; 784 785 } else { 786 787 CurrentVbo = 0; 788 CurrentLbo = FatGetLboFromIndex( Vcb, FcbOrDcb->FirstClusterOfFile ); 789 FirstVboOfCurrentRun = CurrentVbo; 790 FirstLboOfCurrentRun = CurrentLbo; 791 792 Runs = 0; 793 794 DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo); 795 } 796 } 797 798 // 799 // Now we know that we are looking up a valid Vbo, but it is 800 // not in the Mcb, which is a monotonically increasing list of 801 // Vbo's. Thus we have to go to the Fat, and update 802 // the Mcb as we go. We use a try-finally to unpin the page 803 // of fat hanging around. Also we mark *Allocated = FALSE, so that 804 // the caller wont try to use the data if we hit an exception. 805 // 806 807 *Allocated = FALSE; 808 809 _SEH2_TRY { 810 811 FatEntry = (FAT_ENTRY)FatGetIndexFromLbo( Vcb, CurrentLbo ); 812 813 // 814 // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned. 815 // The assumption here, is that only whole clusters of Vbos and Lbos 816 // are mapped in the Mcb. 817 // 818 819 NT_ASSERT( ((CurrentLbo - Vcb->AllocationSupport.FileAreaLbo) 820 % BytesPerCluster == 0) && 821 (CurrentVbo % BytesPerCluster == 0) ); 822 823 // 824 // Starting from the first Vbo after the last Mcb entry, scan through 825 // the Fat looking for our Vbo. We continue through the Fat until we 826 // hit a noncontiguity beyond the desired Vbo, or the last cluster. 827 // 828 829 while ( !LastCluster ) { 830 831 // 832 // Get the next fat entry, and update our Current variables. 833 // 834 835 FatLookupFatEntry( IrpContext, Vcb, FatEntry, (PULONG)&FatEntry, &Context ); 836 837 PriorLbo = CurrentLbo; 838 CurrentLbo = FatGetLboFromIndex( Vcb, FatEntry ); 839 CurrentVbo += BytesPerCluster; 840 841 switch ( FatInterpretClusterType( Vcb, FatEntry )) { 842 843 // 844 // Check for a break in the Fat allocation chain. 845 // 846 847 case FatClusterAvailable: 848 case FatClusterReserved: 849 case FatClusterBad: 850 851 DebugTrace( 0, Dbg, "Break in allocation chain, entry = %d\n", FatEntry); 852 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0); 853 854 FatPopUpFileCorrupt( IrpContext, FcbOrDcb ); 855 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR ); 856 break; 857 858 // 859 // If this is the last cluster, we must update the Mcb and 860 // exit the loop. 861 // 862 863 case FatClusterLast: 864 865 // 866 // Assert we know where the current run started. If the 867 // Mcb was empty when we were called, thenFirstLboOfCurrentRun 868 // was set to the start of the file. If the Mcb contained an 869 // entry, then FirstLboOfCurrentRun was set on the first 870 // iteration through the loop. Thus if FirstLboOfCurrentRun 871 // is 0, then there was an Mcb entry and we are on our first 872 // iteration, meaing that the last cluster in the Mcb was 873 // really the last allocated cluster, but we checked Vbo 874 // against AllocationSize, and found it OK, thus AllocationSize 875 // must be too large. 876 // 877 // Note that, when we finally arrive here, CurrentVbo is actually 878 // the first Vbo beyond the file allocation and CurrentLbo is 879 // meaningless. 880 // 881 882 DebugTrace( 0, Dbg, "Read last cluster of file.\n", 0); 883 884 // 885 // Detect the case of the maximal file. Note that this really isn't 886 // a proper Vbo - those are zero-based, and this is a one-based number. 887 // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of 888 // 2^32 - 2. 889 // 890 // Just so we don't get confused here. 891 // 892 893 if (CurrentVbo == 0) { 894 895 *EndOnMax = TRUE; 896 CurrentVbo -= 1; 897 } 898 899 LastCluster = TRUE; 900 901 if (FirstLboOfCurrentRun != 0 ) { 902 903 DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0); 904 DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun); 905 DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun); 906 DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun); 907 908 (VOID)FatAddMcbEntry( Vcb, 909 &FcbOrDcb->Mcb, 910 FirstVboOfCurrentRun, 911 FirstLboOfCurrentRun, 912 CurrentVbo - FirstVboOfCurrentRun ); 913 914 Runs += 1; 915 } 916 917 // 918 // Being at the end of allocation, make sure we have found 919 // the Vbo. If we haven't, seeing as we checked VBO 920 // against AllocationSize, the real disk allocation is less 921 // than that of AllocationSize. This comes about when the 922 // real allocation is not yet known, and AllocaitonSize 923 // contains MAXULONG. 924 // 925 // KLUDGE! - If we were called by FatLookupFileAllocationSize 926 // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup 927 // hint. Thus we merrily go along looking for a match that isn't 928 // there, but in the meantime building an Mcb. If this is 929 // the case, fill in AllocationSize and return. 930 // 931 932 if ( Vbo == MAXULONG - 1 ) { 933 934 *Allocated = FALSE; 935 936 FcbOrDcb->Header.AllocationSize.QuadPart = CurrentVbo; 937 938 DebugTrace( 0, Dbg, "New file allocation size = %08lx.\n", CurrentVbo); 939 try_return ( NOTHING ); 940 } 941 942 // 943 // We will lie ever so slightly if we really terminated on the 944 // maximal byte of a file. It is really allocated. 945 // 946 947 if (Vbo >= CurrentVbo && !*EndOnMax) { 948 949 *Allocated = FALSE; 950 try_return ( NOTHING ); 951 } 952 953 break; 954 955 // 956 // This is a continuation in the chain. If the run has a 957 // discontiguity at this point, update the Mcb, and if we are beyond 958 // the desired Vbo, this is the end of the run, so set LastCluster 959 // and exit the loop. 960 // 961 962 case FatClusterNext: 963 964 // 965 // This is the loop check. The Vbo must not be bigger than the size of 966 // the volume, and the Vbo must not have a) wrapped and b) not been at the 967 // very last cluster in the chain, for the case of the maximal file. 968 // 969 970 if ( CurrentVbo == 0 || 971 (BytesOnVolume.HighPart == 0 && CurrentVbo > BytesOnVolume.LowPart)) { 972 973 FatPopUpFileCorrupt( IrpContext, FcbOrDcb ); 974 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR ); 975 } 976 977 if ( PriorLbo + BytesPerCluster != CurrentLbo ) { 978 979 // 980 // Note that on the first time through the loop 981 // (FirstLboOfCurrentRun == 0), we don't add the 982 // run to the Mcb since it curresponds to the last 983 // run already stored in the Mcb. 984 // 985 986 if ( FirstLboOfCurrentRun != 0 ) { 987 988 DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0); 989 DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun); 990 DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun); 991 DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun); 992 993 FatAddMcbEntry( Vcb, 994 &FcbOrDcb->Mcb, 995 FirstVboOfCurrentRun, 996 FirstLboOfCurrentRun, 997 CurrentVbo - FirstVboOfCurrentRun ); 998 999 Runs += 1; 1000 } 1001 1002 // 1003 // Since we are at a run boundry, with CurrentLbo and 1004 // CurrentVbo being the first cluster of the next run, 1005 // we see if the run we just added encompases the desired 1006 // Vbo, and if so exit. Otherwise we set up two new 1007 // First*boOfCurrentRun, and continue. 1008 // 1009 1010 if (CurrentVbo > Vbo) { 1011 1012 LastCluster = TRUE; 1013 1014 } else { 1015 1016 FirstVboOfCurrentRun = CurrentVbo; 1017 FirstLboOfCurrentRun = CurrentLbo; 1018 } 1019 } 1020 break; 1021 1022 default: 1023 1024 DebugTrace(0, Dbg, "Illegal Cluster Type.\n", FatEntry); 1025 1026 #ifdef _MSC_VER 1027 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" ) 1028 #endif 1029 FatBugCheck( 0, 0, 0 ); 1030 1031 break; 1032 1033 } // switch() 1034 } // while() 1035 1036 // 1037 // Load up the return parameters. 1038 // 1039 // On exit from the loop, Vbo still contains the desired Vbo, and 1040 // CurrentVbo is the first byte after the run that contained the 1041 // desired Vbo. 1042 // 1043 1044 *Allocated = TRUE; 1045 1046 *Lbo = FirstLboOfCurrentRun + (Vbo - FirstVboOfCurrentRun); 1047 1048 *ByteCount = CurrentVbo - Vbo; 1049 1050 if (ARGUMENT_PRESENT(Index)) { 1051 1052 // 1053 // Note that Runs only needs to be accurate with respect to where we 1054 // ended. Since partial-lookup cases will occur without exclusive 1055 // synchronization, the Mcb itself may be much bigger by now. 1056 // 1057 1058 *Index = Runs - 1; 1059 } 1060 1061 try_exit: NOTHING; 1062 1063 } _SEH2_FINALLY { 1064 1065 DebugUnwind( FatLookupFileAllocation ); 1066 1067 // 1068 // We are done reading the Fat, so unpin the last page of fat 1069 // that is hanging around 1070 // 1071 1072 FatUnpinBcb( IrpContext, Context.Bcb ); 1073 1074 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0); 1075 } _SEH2_END; 1076 1077 return; 1078 } 1079 1080 1081 _Requires_lock_held_(_Global_critical_region_) 1082 VOID 1083 FatAddFileAllocation ( 1084 IN PIRP_CONTEXT IrpContext, 1085 IN PFCB FcbOrDcb, 1086 IN PFILE_OBJECT FileObject OPTIONAL, 1087 IN ULONG DesiredAllocationSize 1088 ) 1089 1090 /*++ 1091 1092 Routine Description: 1093 1094 This routine adds additional allocation to the specified file/directory. 1095 Additional allocation is added by appending clusters to the file/directory. 1096 1097 If the file already has a sufficient allocation then this procedure 1098 is effectively a noop. 1099 1100 Arguments: 1101 1102 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified. 1103 This parameter must not specify the root dcb. 1104 1105 FileObject - If supplied inform the cache manager of the change. 1106 1107 DesiredAllocationSize - Supplies the minimum size, in bytes, that we want 1108 allocated to the file/directory. 1109 1110 --*/ 1111 1112 { 1113 PVCB Vcb; 1114 LARGE_MCB NewMcb = {0}; 1115 PLARGE_MCB McbToCleanup = NULL; 1116 PDIRENT Dirent = NULL; 1117 ULONG NewAllocation = 0; 1118 PBCB Bcb = NULL; 1119 BOOLEAN UnwindWeAllocatedDiskSpace = FALSE; 1120 BOOLEAN UnwindAllocationSizeSet = FALSE; 1121 BOOLEAN UnwindCacheManagerInformed = FALSE; 1122 BOOLEAN UnwindWeInitializedMcb = FALSE; 1123 1124 PAGED_CODE(); 1125 1126 DebugTrace(+1, Dbg, "FatAddFileAllocation\n", 0); 1127 DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb); 1128 DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize); 1129 1130 Vcb = FcbOrDcb->Vcb; 1131 1132 // 1133 // If we haven't yet set the correct AllocationSize, do so. 1134 // 1135 1136 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) { 1137 1138 FatLookupFileAllocationSize( IrpContext, FcbOrDcb ); 1139 } 1140 1141 // 1142 // Check for the benign case that the desired allocation is already 1143 // within the allocation size. 1144 // 1145 1146 if (DesiredAllocationSize <= FcbOrDcb->Header.AllocationSize.LowPart) { 1147 1148 DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0); 1149 1150 DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0); 1151 return; 1152 } 1153 1154 DebugTrace( 0, Dbg, "InitialAllocation = %08lx.\n", FcbOrDcb->Header.AllocationSize.LowPart); 1155 1156 // 1157 // Get a chunk of disk space that will fullfill our needs. If there 1158 // was no initial allocation, start from the hint in the Vcb, otherwise 1159 // try to allocate from the cluster after the initial allocation. 1160 // 1161 // If there was no initial allocation to the file, we can just use the 1162 // Mcb in the FcbOrDcb, otherwise we have to use a new one, and merge 1163 // it to the one in the FcbOrDcb. 1164 // 1165 1166 _SEH2_TRY { 1167 1168 if (FcbOrDcb->Header.AllocationSize.LowPart == 0) { 1169 1170 LBO FirstLboOfFile; 1171 1172 NT_ASSERT( FcbOrDcb->FcbCondition == FcbGood ); 1173 1174 FatGetDirentFromFcbOrDcb( IrpContext, 1175 FcbOrDcb, 1176 FALSE, 1177 &Dirent, 1178 &Bcb ); 1179 // 1180 // Set this dirty right now since this call can fail. 1181 // 1182 1183 FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE ); 1184 1185 FatAllocateDiskSpace( IrpContext, 1186 Vcb, 1187 0, 1188 &DesiredAllocationSize, 1189 FALSE, 1190 &FcbOrDcb->Mcb ); 1191 1192 UnwindWeAllocatedDiskSpace = TRUE; 1193 McbToCleanup = &FcbOrDcb->Mcb; 1194 1195 // 1196 // We have to update the dirent and FcbOrDcb copies of 1197 // FirstClusterOfFile since before it was 0 1198 // 1199 1200 FatLookupMcbEntry( FcbOrDcb->Vcb, 1201 &FcbOrDcb->Mcb, 1202 0, 1203 &FirstLboOfFile, 1204 (PULONG)NULL, 1205 NULL ); 1206 1207 DebugTrace( 0, Dbg, "First Lbo of file will be %08lx.\n", FirstLboOfFile ); 1208 1209 FcbOrDcb->FirstClusterOfFile = FatGetIndexFromLbo( Vcb, FirstLboOfFile ); 1210 1211 Dirent->FirstClusterOfFile = (USHORT)FcbOrDcb->FirstClusterOfFile; 1212 1213 if ( FatIsFat32(Vcb) ) { 1214 1215 Dirent->FirstClusterOfFileHi = (USHORT)(FcbOrDcb->FirstClusterOfFile >> 16); 1216 } 1217 1218 // 1219 // Note the size of the allocation we need to tell the cache manager about. 1220 // 1221 1222 NewAllocation = DesiredAllocationSize; 1223 1224 } else { 1225 1226 LBO LastAllocatedLbo; 1227 VBO DontCare; 1228 1229 // 1230 // Get the first cluster following the current allocation. It is possible 1231 // the Mcb is empty (or short, etc.) so we need to be slightly careful 1232 // about making sure we don't lie with the hint. 1233 // 1234 1235 (void)FatLookupLastMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, &DontCare, &LastAllocatedLbo, NULL ); 1236 1237 // 1238 // Try to get some disk space starting from there. 1239 // 1240 1241 NewAllocation = DesiredAllocationSize - FcbOrDcb->Header.AllocationSize.LowPart; 1242 1243 FsRtlInitializeLargeMcb( &NewMcb, PagedPool ); 1244 UnwindWeInitializedMcb = TRUE; 1245 McbToCleanup = &NewMcb; 1246 1247 FatAllocateDiskSpace( IrpContext, 1248 Vcb, 1249 (LastAllocatedLbo != ~0 ? 1250 FatGetIndexFromLbo(Vcb,LastAllocatedLbo + 1) : 1251 0), 1252 &NewAllocation, 1253 FALSE, 1254 &NewMcb ); 1255 1256 UnwindWeAllocatedDiskSpace = TRUE; 1257 } 1258 1259 // 1260 // Now that we increased the allocation of the file, mark it in the 1261 // FcbOrDcb. Carefully prepare to handle an inability to grow the cache 1262 // structures. 1263 // 1264 1265 FcbOrDcb->Header.AllocationSize.LowPart += NewAllocation; 1266 1267 // 1268 // Handle the maximal file case, where we may have just wrapped. Note 1269 // that this must be the precise boundary case wrap, i.e. by one byte, 1270 // so that the new allocation is actually one byte "less" as far as we're 1271 // concerned. This is important for the extension case. 1272 // 1273 1274 if (FcbOrDcb->Header.AllocationSize.LowPart == 0) { 1275 1276 NewAllocation -= 1; 1277 FcbOrDcb->Header.AllocationSize.LowPart = 0xffffffff; 1278 } 1279 1280 UnwindAllocationSizeSet = TRUE; 1281 1282 // 1283 // Inform the cache manager to increase the section size 1284 // 1285 1286 if ( ARGUMENT_PRESENT(FileObject) && CcIsFileCached(FileObject) ) { 1287 1288 CcSetFileSizes( FileObject, 1289 (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize ); 1290 UnwindCacheManagerInformed = TRUE; 1291 } 1292 1293 // 1294 // In the extension case, we have held off actually gluing the new 1295 // allocation onto the file. This simplifies exception cleanup since 1296 // if it was already added and the section grow failed, we'd have to 1297 // do extra work to unglue it. This way, we can assume that if we 1298 // raise the only thing we need to do is deallocate the disk space. 1299 // 1300 // Merge the allocation now. 1301 // 1302 1303 if (FcbOrDcb->Header.AllocationSize.LowPart != NewAllocation) { 1304 1305 // 1306 // Tack the new Mcb onto the end of the FcbOrDcb one. 1307 // 1308 1309 FatMergeAllocation( IrpContext, 1310 Vcb, 1311 &FcbOrDcb->Mcb, 1312 &NewMcb ); 1313 } 1314 1315 } _SEH2_FINALLY { 1316 1317 DebugUnwind( FatAddFileAllocation ); 1318 1319 // 1320 // Give FlushFileBuffer/Cleanup a clue here, regardless of success/fail.. 1321 // 1322 1323 SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_FAT); 1324 1325 // 1326 // If we were dogged trying to complete this operation, we need to go 1327 // back various things out. 1328 // 1329 1330 if (_SEH2_AbnormalTermination()) { 1331 1332 // 1333 // Pull off the allocation size we tried to add to this object if 1334 // we failed to grow cache structures or Mcb structures. 1335 // 1336 1337 if (UnwindAllocationSizeSet) { 1338 1339 FcbOrDcb->Header.AllocationSize.LowPart -= NewAllocation; 1340 } 1341 1342 if (UnwindCacheManagerInformed) { 1343 1344 CcSetFileSizes( FileObject, 1345 (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize ); 1346 } 1347 1348 // 1349 // In the case of initial allocation, we used the Fcb's Mcb and have 1350 // to clean that up as well as the FAT chain references. 1351 // 1352 1353 if (FcbOrDcb->Header.AllocationSize.LowPart == 0) { 1354 1355 if (Dirent != NULL) { 1356 1357 FcbOrDcb->FirstClusterOfFile = 0; 1358 Dirent->FirstClusterOfFile = 0; 1359 1360 if ( FatIsFat32(Vcb) ) { 1361 1362 Dirent->FirstClusterOfFileHi = 0; 1363 } 1364 } 1365 } 1366 1367 // 1368 // ... and drop the dirent Bcb if we got it. Do it now 1369 // so we can afford to take the exception if we have to. 1370 // 1371 1372 FatUnpinBcb( IrpContext, Bcb ); 1373 1374 _SEH2_TRY { 1375 1376 // 1377 // Note this can re-raise. 1378 // 1379 1380 if ( UnwindWeAllocatedDiskSpace ) { 1381 1382 FatDeallocateDiskSpace( IrpContext, Vcb, McbToCleanup, FALSE ); 1383 } 1384 1385 } _SEH2_FINALLY { 1386 1387 // 1388 // We always want to clean up the non-initial allocation temporary Mcb, 1389 // otherwise we have the Fcb's Mcb and we just truncate it away. 1390 // 1391 1392 if (UnwindWeInitializedMcb == TRUE) { 1393 1394 // 1395 // Note that we already know a raise is in progress. No danger 1396 // of encountering the normal case code below and doing this again. 1397 // 1398 1399 FsRtlUninitializeLargeMcb( McbToCleanup ); 1400 1401 } else { 1402 1403 if (McbToCleanup) { 1404 1405 FsRtlTruncateLargeMcb( McbToCleanup, 0 ); 1406 } 1407 } 1408 } _SEH2_END; 1409 } 1410 1411 DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0); 1412 } _SEH2_END; 1413 1414 // 1415 // Non-exceptional cleanup we always want to do. In handling the re-raise possibilities 1416 // during exceptions we had to make sure these two steps always happened there beforehand. 1417 // So now we handle the usual case. 1418 // 1419 1420 FatUnpinBcb( IrpContext, Bcb ); 1421 1422 if (UnwindWeInitializedMcb == TRUE) { 1423 1424 FsRtlUninitializeLargeMcb( &NewMcb ); 1425 } 1426 } 1427 1428 _Requires_lock_held_(_Global_critical_region_) 1429 VOID 1430 FatTruncateFileAllocation ( 1431 IN PIRP_CONTEXT IrpContext, 1432 IN PFCB FcbOrDcb, 1433 IN ULONG DesiredAllocationSize 1434 ) 1435 1436 /*++ 1437 1438 Routine Description: 1439 1440 This routine truncates the allocation to the specified file/directory. 1441 1442 If the file is already smaller than the indicated size then this procedure 1443 is effectively a noop. 1444 1445 1446 Arguments: 1447 1448 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified 1449 This parameter must not specify the root dcb. 1450 1451 DesiredAllocationSize - Supplies the maximum size, in bytes, that we want 1452 allocated to the file/directory. It is rounded 1453 up to the nearest cluster. 1454 1455 Return Value: 1456 1457 VOID - TRUE if the operation completed and FALSE if it had to 1458 block but could not. 1459 1460 --*/ 1461 1462 { 1463 PVCB Vcb; 1464 PBCB Bcb = NULL; 1465 LARGE_MCB RemainingMcb = {0}; 1466 ULONG BytesPerCluster; 1467 PDIRENT Dirent = NULL; 1468 BOOLEAN UpdatedDirent = FALSE; 1469 1470 ULONG UnwindInitialAllocationSize; 1471 ULONG UnwindInitialFirstClusterOfFile; 1472 BOOLEAN UnwindWeAllocatedMcb = FALSE; 1473 1474 PAGED_CODE(); 1475 1476 Vcb = FcbOrDcb->Vcb; 1477 1478 DebugTrace(+1, Dbg, "FatTruncateFileAllocation\n", 0); 1479 DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb); 1480 DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize); 1481 1482 // 1483 // If the Fcb isn't in good condition, we have no business whacking around on 1484 // the disk after "its" clusters. 1485 // 1486 // Inspired by a Prefix complaint. 1487 // 1488 1489 NT_ASSERT( FcbOrDcb->FcbCondition == FcbGood ); 1490 1491 // 1492 // If we haven't yet set the correct AllocationSize, do so. 1493 // 1494 1495 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) { 1496 1497 FatLookupFileAllocationSize( IrpContext, FcbOrDcb ); 1498 } 1499 1500 // 1501 // Round up the Desired Allocation Size to the next cluster size 1502 // 1503 1504 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster; 1505 1506 // 1507 // Note if the desired allocation is zero, to distinguish this from 1508 // the wrap case below. 1509 // 1510 1511 if (DesiredAllocationSize != 0) { 1512 1513 DesiredAllocationSize = (DesiredAllocationSize + (BytesPerCluster - 1)) & 1514 ~(BytesPerCluster - 1); 1515 // 1516 // Check for the benign case that the file is already smaller than 1517 // the desired truncation. Note that if it wraps, then a) it was 1518 // specifying an offset in the maximally allocatable cluster and 1519 // b) we're not asking to extend the file, either. So stop. 1520 // 1521 1522 if (DesiredAllocationSize == 0 || 1523 DesiredAllocationSize >= FcbOrDcb->Header.AllocationSize.LowPart) { 1524 1525 DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0); 1526 1527 DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0); 1528 return; 1529 } 1530 1531 } 1532 1533 // 1534 // This is a no-op if the allocation size is already what we want. 1535 // 1536 1537 if (DesiredAllocationSize == FcbOrDcb->Header.AllocationSize.LowPart) { 1538 1539 DebugTrace(0, Dbg, "Desired size equals current allocation.\n", 0); 1540 DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0); 1541 return; 1542 } 1543 1544 UnwindInitialAllocationSize = FcbOrDcb->Header.AllocationSize.LowPart; 1545 UnwindInitialFirstClusterOfFile = FcbOrDcb->FirstClusterOfFile; 1546 1547 // 1548 // Update the FcbOrDcb allocation size. If it is now zero, we have the 1549 // additional task of modifying the FcbOrDcb and Dirent copies of 1550 // FirstClusterInFile. 1551 // 1552 // Note that we must pin the dirent before actually deallocating the 1553 // disk space since, in unwind, it would not be possible to reallocate 1554 // deallocated disk space as someone else may have reallocated it and 1555 // may cause an exception when you try to get some more disk space. 1556 // Thus FatDeallocateDiskSpace must be the final dangerous operation. 1557 // 1558 1559 _SEH2_TRY { 1560 1561 FcbOrDcb->Header.AllocationSize.QuadPart = DesiredAllocationSize; 1562 1563 // 1564 // Special case 0 1565 // 1566 1567 if (DesiredAllocationSize == 0) { 1568 1569 // 1570 // We have to update the dirent and FcbOrDcb copies of 1571 // FirstClusterOfFile since before it was 0 1572 // 1573 1574 NT_ASSERT( FcbOrDcb->FcbCondition == FcbGood ); 1575 1576 FatGetDirentFromFcbOrDcb( IrpContext, FcbOrDcb, FALSE, &Dirent, &Bcb ); 1577 1578 Dirent->FirstClusterOfFile = 0; 1579 1580 if (FatIsFat32(Vcb)) { 1581 1582 Dirent->FirstClusterOfFileHi = 0; 1583 } 1584 1585 FcbOrDcb->FirstClusterOfFile = 0; 1586 1587 FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE ); 1588 UpdatedDirent = TRUE; 1589 1590 FatDeallocateDiskSpace( IrpContext, Vcb, &FcbOrDcb->Mcb, ((FcbOrDcb->FcbState & FCB_STATE_ZERO_ON_DEALLOCATION) != 0)); 1591 1592 FatRemoveMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF ); 1593 1594 } else { 1595 1596 // 1597 // Split the existing allocation into two parts, one we will keep, and 1598 // one we will deallocate. 1599 // 1600 1601 FsRtlInitializeLargeMcb( &RemainingMcb, PagedPool ); 1602 UnwindWeAllocatedMcb = TRUE; 1603 1604 FatSplitAllocation( IrpContext, 1605 Vcb, 1606 &FcbOrDcb->Mcb, 1607 DesiredAllocationSize, 1608 &RemainingMcb ); 1609 1610 FatDeallocateDiskSpace( IrpContext, Vcb, &RemainingMcb, ((FcbOrDcb->FcbState & FCB_STATE_ZERO_ON_DEALLOCATION) != 0) ); 1611 1612 FsRtlUninitializeLargeMcb( &RemainingMcb ); 1613 } 1614 1615 } _SEH2_FINALLY { 1616 1617 DebugUnwind( FatTruncateFileAllocation ); 1618 1619 // 1620 // Is this really the right backout strategy? It would be nice if we could 1621 // pretend the truncate worked if we knew that the file had gotten into 1622 // a consistent state. Leaving dangled clusters is probably quite preferable. 1623 // 1624 1625 if ( _SEH2_AbnormalTermination() ) { 1626 1627 FcbOrDcb->Header.AllocationSize.LowPart = UnwindInitialAllocationSize; 1628 1629 if ( (DesiredAllocationSize == 0) && (Dirent != NULL)) { 1630 1631 if (UpdatedDirent) { 1632 1633 // 1634 // If the dirent has been updated ok and marked dirty, then we 1635 // failed in deallocatediscspace, and don't know what state 1636 // the on disc fat chain is in. So we throw away the mcb, 1637 // and potentially loose a few clusters until the next 1638 // chkdsk. The operation has succeeded, but the exception 1639 // will still propogate. 5.1 1640 // 1641 1642 FatRemoveMcbEntry( Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF ); 1643 FcbOrDcb->Header.AllocationSize.QuadPart = 0; 1644 } 1645 else if (FcbOrDcb->FirstClusterOfFile == 0) { 1646 1647 Dirent->FirstClusterOfFile = (USHORT)UnwindInitialFirstClusterOfFile; 1648 1649 if ( FatIsFat32(Vcb) ) { 1650 1651 Dirent->FirstClusterOfFileHi = 1652 (USHORT)(UnwindInitialFirstClusterOfFile >> 16); 1653 } 1654 1655 FcbOrDcb->FirstClusterOfFile = UnwindInitialFirstClusterOfFile; 1656 } 1657 } 1658 1659 if ( UnwindWeAllocatedMcb ) { 1660 1661 FsRtlUninitializeLargeMcb( &RemainingMcb ); 1662 } 1663 1664 // 1665 // Note that in the non zero truncation case, we will also 1666 // leak clusters. However, apart from this, the in memory and on disc 1667 // structures will agree. 1668 } 1669 1670 FatUnpinBcb( IrpContext, Bcb ); 1671 1672 // 1673 // Give FlushFileBuffer/Cleanup a clue here, regardless of success/fail. 1674 // 1675 1676 SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_FAT); 1677 1678 DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0); 1679 } _SEH2_END; 1680 } 1681 1682 1683 _Requires_lock_held_(_Global_critical_region_) 1684 VOID 1685 FatLookupFileAllocationSize ( 1686 IN PIRP_CONTEXT IrpContext, 1687 IN PFCB FcbOrDcb 1688 ) 1689 1690 /*++ 1691 1692 Routine Description: 1693 1694 This routine retrieves the current file allocatio size for the 1695 specified file/directory. 1696 1697 Arguments: 1698 1699 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified 1700 1701 --*/ 1702 1703 { 1704 LBO Lbo; 1705 ULONG ByteCount; 1706 BOOLEAN DontCare; 1707 1708 PAGED_CODE(); 1709 1710 DebugTrace(+1, Dbg, "FatLookupAllocationSize\n", 0); 1711 DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb); 1712 1713 // 1714 // We call FatLookupFileAllocation with Vbo of 0xffffffff - 1. 1715 // 1716 1717 FatLookupFileAllocation( IrpContext, 1718 FcbOrDcb, 1719 MAXULONG - 1, 1720 &Lbo, 1721 &ByteCount, 1722 &DontCare, 1723 &DontCare, 1724 NULL ); 1725 1726 // 1727 // FileSize was set at Fcb creation time from the contents of the directory entry, 1728 // and we are only now looking up the real length of the allocation chain. If it 1729 // cannot be contained, this is trash. Probably more where that came from. 1730 // 1731 1732 if (FcbOrDcb->Header.FileSize.LowPart > FcbOrDcb->Header.AllocationSize.LowPart) { 1733 1734 FatPopUpFileCorrupt( IrpContext, FcbOrDcb ); 1735 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR ); 1736 } 1737 1738 DebugTrace(-1, Dbg, "FatLookupFileAllocationSize -> (VOID)\n", 0); 1739 return; 1740 } 1741 1742 1743 _Requires_lock_held_(_Global_critical_region_) 1744 VOID 1745 FatAllocateDiskSpace ( 1746 IN PIRP_CONTEXT IrpContext, 1747 IN PVCB Vcb, 1748 IN ULONG AbsoluteClusterHint, 1749 IN PULONG ByteCount, 1750 IN BOOLEAN ExactMatchRequired, 1751 OUT PLARGE_MCB Mcb 1752 ) 1753 1754 /*++ 1755 1756 Routine Description: 1757 1758 This procedure allocates additional disk space and builds an mcb 1759 representing the newly allocated space. If the space cannot be 1760 allocated then this procedure raises an appropriate status. 1761 1762 Searching starts from the hint index in the Vcb unless an alternative 1763 non-zero hint is given in AlternateClusterHint. If we are using the 1764 hint field in the Vcb, it is set to the cluster following our allocation 1765 when we are done. 1766 1767 Disk space can only be allocated in cluster units so this procedure 1768 will round up any byte count to the next cluster boundary. 1769 1770 Pictorially what is done is the following (where ! denotes the end of 1771 the fat chain (i.e., FAT_CLUSTER_LAST)): 1772 1773 1774 Mcb (empty) 1775 1776 becomes 1777 1778 Mcb |--a--|--b--|--c--! 1779 1780 ^ 1781 ByteCount ----------+ 1782 1783 Arguments: 1784 1785 Vcb - Supplies the VCB being modified 1786 1787 AbsoluteClusterHint - Supplies an alternate hint index to start the 1788 search from. If this is zero we use, and update, 1789 the Vcb hint field. 1790 1791 ByteCount - Supplies the number of bytes that we are requesting, and 1792 receives the number of bytes that we got. 1793 1794 ExactMatchRequired - Caller should set this to TRUE if only the precise run requested 1795 is acceptable. 1796 1797 Mcb - Receives the MCB describing the newly allocated disk space. The 1798 caller passes in an initialized Mcb that is filled in by this procedure. 1799 1800 Return Value: 1801 1802 TRUE - Allocated ok 1803 FALSE - Failed to allocate exactly as requested (=> ExactMatchRequired was TRUE) 1804 1805 --*/ 1806 1807 { 1808 UCHAR LogOfBytesPerCluster; 1809 ULONG BytesPerCluster; 1810 ULONG StartingCluster; 1811 ULONG ClusterCount; 1812 ULONG WindowRelativeHint; 1813 #if DBG 1814 ULONG PreviousClear = 0; 1815 #endif 1816 1817 PFAT_WINDOW Window; 1818 BOOLEAN Wait = FALSE; 1819 BOOLEAN Result = TRUE; 1820 1821 PAGED_CODE(); 1822 1823 DebugTrace(+1, Dbg, "FatAllocateDiskSpace\n", 0); 1824 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 1825 DebugTrace( 0, Dbg, " *ByteCount = %8lx\n", *ByteCount); 1826 DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb); 1827 DebugTrace( 0, Dbg, " Hint = %8lx\n", AbsoluteClusterHint); 1828 1829 NT_ASSERT((AbsoluteClusterHint <= Vcb->AllocationSupport.NumberOfClusters + 2) && (1 != AbsoluteClusterHint)); 1830 1831 // 1832 // Make sure byte count is not zero 1833 // 1834 1835 if (*ByteCount == 0) { 1836 1837 DebugTrace(0, Dbg, "Nothing to allocate.\n", 0); 1838 1839 DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0); 1840 return; 1841 } 1842 1843 // 1844 // Compute the cluster count based on the byte count, rounding up 1845 // to the next cluster if there is any remainder. Note that the 1846 // pathalogical case BytesCount == 0 has been eliminated above. 1847 // 1848 1849 LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster; 1850 BytesPerCluster = 1 << LogOfBytesPerCluster; 1851 1852 *ByteCount = (*ByteCount + (BytesPerCluster - 1)) 1853 & ~(BytesPerCluster - 1); 1854 1855 // 1856 // If ByteCount is NOW zero, then we were asked for the maximal 1857 // filesize (or at least for bytes in the last allocatable sector). 1858 // 1859 1860 if (*ByteCount == 0) { 1861 1862 *ByteCount = 0xffffffff; 1863 ClusterCount = 1 << (32 - LogOfBytesPerCluster); 1864 1865 } else { 1866 1867 ClusterCount = (*ByteCount >> LogOfBytesPerCluster); 1868 } 1869 1870 // 1871 // Analysis tools don't figure out that ClusterCount is not zero because 1872 // of the ByteCount == 0 checks, so give them a hint. 1873 // 1874 _Analysis_assume_(ClusterCount > 0); 1875 1876 // 1877 // Make sure there are enough free clusters to start with, and 1878 // take them now so that nobody else takes them from us. 1879 // 1880 1881 ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE); 1882 FatLockFreeClusterBitMap( Vcb ); 1883 1884 if (ClusterCount <= Vcb->AllocationSupport.NumberOfFreeClusters) { 1885 1886 Vcb->AllocationSupport.NumberOfFreeClusters -= ClusterCount; 1887 1888 } else { 1889 1890 FatUnlockFreeClusterBitMap( Vcb ); 1891 ExReleaseResourceLite(&Vcb->ChangeBitMapResource); 1892 1893 DebugTrace(0, Dbg, "Disk Full. Raise Status.\n", 0); 1894 FatRaiseStatus( IrpContext, STATUS_DISK_FULL ); 1895 } 1896 1897 // 1898 // Did the caller supply a hint? 1899 // 1900 1901 if ((0 != AbsoluteClusterHint) && (AbsoluteClusterHint < (Vcb->AllocationSupport.NumberOfClusters + 2))) { 1902 1903 if (Vcb->NumberOfWindows > 1) { 1904 1905 // 1906 // If we're being called upon to allocate clusters outside the 1907 // current window (which happens only via MoveFile), it's a problem. 1908 // We address this by changing the current window to be the one which 1909 // contains the alternate cluster hint. Note that if the user's 1910 // request would cross a window boundary, he doesn't really get what 1911 // he wanted. 1912 // 1913 1914 if (AbsoluteClusterHint < Vcb->CurrentWindow->FirstCluster || 1915 AbsoluteClusterHint > Vcb->CurrentWindow->LastCluster) { 1916 1917 ULONG BucketNum = FatWindowOfCluster( AbsoluteClusterHint ); 1918 1919 NT_ASSERT( BucketNum < Vcb->NumberOfWindows); 1920 1921 // 1922 // Drop our shared lock on the ChangeBitMapResource, and pick it up again 1923 // exclusive in preparation for making the window swap. 1924 // 1925 1926 FatUnlockFreeClusterBitMap(Vcb); 1927 ExReleaseResourceLite(&Vcb->ChangeBitMapResource); 1928 ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE); 1929 FatLockFreeClusterBitMap(Vcb); 1930 1931 Window = &Vcb->Windows[BucketNum]; 1932 1933 // 1934 // Again, test the current window against the one we want - some other 1935 // thread could have sneaked in behind our backs and kindly set it to the one 1936 // we need, when we dropped and reacquired the ChangeBitMapResource above. 1937 // 1938 1939 if (Window != Vcb->CurrentWindow) { 1940 1941 _SEH2_TRY { 1942 1943 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); 1944 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); 1945 1946 // 1947 // Change to the new window (update Vcb->CurrentWindow) and scan it 1948 // to build up a freespace bitmap etc. 1949 // 1950 1951 FatExamineFatEntries( IrpContext, Vcb, 1952 0, 1953 0, 1954 FALSE, 1955 Window, 1956 NULL); 1957 1958 } _SEH2_FINALLY { 1959 1960 if (!Wait) { 1961 1962 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); 1963 } 1964 1965 if (_SEH2_AbnormalTermination()) { 1966 1967 // 1968 // We will have raised as a result of failing to pick up the 1969 // chunk of the FAT for this window move. Release our resources 1970 // and return the cluster count to the volume. 1971 // 1972 1973 Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount; 1974 1975 FatUnlockFreeClusterBitMap( Vcb ); 1976 ExReleaseResourceLite(&Vcb->ChangeBitMapResource); 1977 } 1978 } _SEH2_END; 1979 } 1980 } 1981 1982 // 1983 // Make the hint cluster number relative to the base of the current window... 1984 // 1985 // Currentwindow->Firstcluster is baised by +2 already, so we will lose the 1986 // bias already in AbsoluteClusterHint. Put it back.... 1987 // 1988 1989 WindowRelativeHint = AbsoluteClusterHint - Vcb->CurrentWindow->FirstCluster + 2; 1990 } 1991 else { 1992 1993 // 1994 // Only one 'window', ie fat16/12. No modification necessary. 1995 // 1996 1997 WindowRelativeHint = AbsoluteClusterHint; 1998 } 1999 } 2000 else { 2001 2002 // 2003 // Either no hint supplied, or it was out of range, so grab one from the Vcb 2004 // 2005 // NOTE: Clusterhint in the Vcb is not guaranteed to be set (may be -1) 2006 // 2007 2008 WindowRelativeHint = Vcb->ClusterHint; 2009 AbsoluteClusterHint = 0; 2010 2011 // 2012 // Vcb hint may not have been initialized yet. Force to valid cluster. 2013 // 2014 2015 if (-1 == WindowRelativeHint) { 2016 2017 WindowRelativeHint = 2; 2018 } 2019 } 2020 2021 NT_ASSERT((WindowRelativeHint >= 2) && (WindowRelativeHint < Vcb->FreeClusterBitMap.SizeOfBitMap + 2)); 2022 2023 // 2024 // Keep track of the window we're allocating from, so we can clean 2025 // up correctly if the current window changes after we unlock the 2026 // bitmap. 2027 // 2028 2029 Window = Vcb->CurrentWindow; 2030 2031 // 2032 // Try to find a run of free clusters large enough for us. 2033 // 2034 2035 StartingCluster = FatFindFreeClusterRun( IrpContext, 2036 Vcb, 2037 ClusterCount, 2038 WindowRelativeHint ); 2039 // 2040 // If the above call was successful, we can just update the fat 2041 // and Mcb and exit. Otherwise we have to look for smaller free 2042 // runs. 2043 // 2044 // This test is a bit funky. Note that the error return from 2045 // RtlFindClearBits is -1, and adding two to that is 1. 2046 // 2047 2048 if ((StartingCluster != 1) && 2049 ((0 == AbsoluteClusterHint) || (StartingCluster == WindowRelativeHint)) 2050 ) { 2051 2052 #if DBG 2053 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ); 2054 #endif // DBG 2055 2056 // 2057 // Take the clusters we found, and unlock the bit map. 2058 // 2059 2060 FatReserveClusters(IrpContext, Vcb, StartingCluster, ClusterCount); 2061 2062 Window->ClustersFree -= ClusterCount; 2063 2064 StartingCluster += Window->FirstCluster; 2065 StartingCluster -= 2; 2066 2067 NT_ASSERT( PreviousClear - ClusterCount == Window->ClustersFree ); 2068 2069 FatUnlockFreeClusterBitMap( Vcb ); 2070 2071 // 2072 // Note that this call will never fail since there is always 2073 // room for one entry in an empty Mcb. 2074 // 2075 2076 FatAddMcbEntry( Vcb, Mcb, 2077 0, 2078 FatGetLboFromIndex( Vcb, StartingCluster ), 2079 *ByteCount); 2080 _SEH2_TRY { 2081 2082 // 2083 // Update the fat. 2084 // 2085 2086 FatAllocateClusters(IrpContext, Vcb, 2087 StartingCluster, 2088 ClusterCount); 2089 2090 } _SEH2_FINALLY { 2091 2092 DebugUnwind( FatAllocateDiskSpace ); 2093 2094 // 2095 // If the allocate clusters failed, remove the run from the Mcb, 2096 // unreserve the clusters, and reset the free cluster count. 2097 // 2098 2099 if (_SEH2_AbnormalTermination()) { 2100 2101 FatRemoveMcbEntry( Vcb, Mcb, 0, *ByteCount ); 2102 2103 FatLockFreeClusterBitMap( Vcb ); 2104 2105 // Only clear bits if the bitmap window is the same. 2106 2107 if (Window == Vcb->CurrentWindow) { 2108 2109 // Both values (startingcluster and window->firstcluster) are 2110 // already biased by 2, so will cancel, so we need to add in the 2 again. 2111 2112 FatUnreserveClusters( IrpContext, Vcb, 2113 StartingCluster - Window->FirstCluster + 2, 2114 ClusterCount ); 2115 } 2116 2117 Window->ClustersFree += ClusterCount; 2118 Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount; 2119 2120 FatUnlockFreeClusterBitMap( Vcb ); 2121 } 2122 2123 ExReleaseResourceLite(&Vcb->ChangeBitMapResource); 2124 } _SEH2_END; 2125 2126 } else { 2127 2128 // 2129 // Note that Index is a zero-based window-relative number. When appropriate 2130 // it'll get converted into a true cluster number and put in Cluster, which 2131 // will be a volume relative true cluster number. 2132 // 2133 2134 ULONG Index = 0; 2135 ULONG Cluster = 0; 2136 ULONG CurrentVbo = 0; 2137 ULONG PriorLastCluster = 0; 2138 ULONG BytesFound = 0; 2139 2140 ULONG ClustersFound = 0; 2141 ULONG ClustersRemaining = 0; 2142 2143 BOOLEAN LockedBitMap = FALSE; 2144 BOOLEAN SelectNextContigWindow = FALSE; 2145 2146 // 2147 // Drop our shared lock on the ChangeBitMapResource, and pick it up again 2148 // exclusive in preparation for making a window swap. 2149 // 2150 2151 FatUnlockFreeClusterBitMap(Vcb); 2152 ExReleaseResourceLite(&Vcb->ChangeBitMapResource); 2153 ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE); 2154 FatLockFreeClusterBitMap(Vcb); 2155 LockedBitMap = TRUE; 2156 2157 _SEH2_TRY { 2158 2159 if ( ExactMatchRequired && (1 == Vcb->NumberOfWindows)) { 2160 2161 // 2162 // Give up right now, there are no more windows to search! RtlFindClearBits 2163 // searchs the whole bitmap, so we would have found any contiguous run 2164 // large enough. 2165 // 2166 2167 try_leave( Result = FALSE); 2168 } 2169 2170 // 2171 // While the request is still incomplete, look for the largest 2172 // run of free clusters, mark them taken, allocate the run in 2173 // the Mcb and Fat, and if this isn't the first time through 2174 // the loop link it to prior run on the fat. The Mcb will 2175 // coalesce automatically. 2176 // 2177 2178 ClustersRemaining = ClusterCount; 2179 CurrentVbo = 0; 2180 PriorLastCluster = 0; 2181 2182 while (ClustersRemaining != 0) { 2183 2184 // 2185 // If we just entered the loop, the bit map is already locked 2186 // 2187 2188 if ( !LockedBitMap ) { 2189 2190 FatLockFreeClusterBitMap( Vcb ); 2191 LockedBitMap = TRUE; 2192 } 2193 2194 // 2195 // Find the largest run of free clusters. If the run is 2196 // bigger than we need, only use what we need. Note that 2197 // this will then be the last while() iteration. 2198 // 2199 2200 // 12/3/95: need to bias bitmap by 2 bits for the defrag 2201 // hooks and the below macro became impossible to do without in-line 2202 // procedures. 2203 // 2204 // ClustersFound = FatLongestFreeClusterRun( IrpContext, Vcb, &Index ); 2205 2206 ClustersFound = 0; 2207 2208 if (!SelectNextContigWindow) { 2209 2210 if ( 0 != WindowRelativeHint) { 2211 2212 ULONG Desired = Vcb->FreeClusterBitMap.SizeOfBitMap - (WindowRelativeHint - 2); 2213 2214 // 2215 // We will try to allocate contiguously. Try from the current hint the to 2216 // end of current window. Don't try for more than we actually need. 2217 // 2218 2219 if (Desired > ClustersRemaining) { 2220 2221 Desired = ClustersRemaining; 2222 } 2223 2224 if (RtlAreBitsClear( &Vcb->FreeClusterBitMap, 2225 WindowRelativeHint - 2, 2226 Desired)) 2227 { 2228 // 2229 // Clusters from hint->...windowend are free. Take them. 2230 // 2231 2232 Index = WindowRelativeHint - 2; 2233 ClustersFound = Desired; 2234 2235 if (FatIsFat32(Vcb)) { 2236 2237 // 2238 // We're now up against the end of the current window, so indicate that we 2239 // want the next window in the sequence next time around. (If we're not up 2240 // against the end of the window, then we got what we needed and won't be 2241 // coming around again anyway). 2242 // 2243 2244 SelectNextContigWindow = TRUE; 2245 WindowRelativeHint = 2; 2246 } 2247 else { 2248 2249 // 2250 // FAT 12/16 - we've run up against the end of the volume. Clear the 2251 // hint, since we now have no idea where to look. 2252 // 2253 2254 WindowRelativeHint = 0; 2255 } 2256 #if DBG 2257 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ); 2258 #endif // DBG 2259 } 2260 else { 2261 2262 if (ExactMatchRequired) { 2263 2264 // 2265 // If our caller required an exact match, then we're hosed. Bail out now. 2266 // 2267 2268 try_leave( Result = FALSE); 2269 } 2270 2271 // 2272 // Hint failed, drop back to pot luck 2273 // 2274 2275 WindowRelativeHint = 0; 2276 } 2277 } 2278 2279 if ((0 == WindowRelativeHint) && (0 == ClustersFound)) { 2280 2281 if (ClustersRemaining <= Vcb->CurrentWindow->ClustersFree) { 2282 2283 // 2284 // The remaining allocation could be satisfied entirely from this 2285 // window. We will ask only for what we need, to try and avoid 2286 // unnecessarily fragmenting large runs of space by always using 2287 // (part of) the largest run we can find. This call will return the 2288 // first run large enough. 2289 // 2290 2291 Index = RtlFindClearBits( &Vcb->FreeClusterBitMap, ClustersRemaining, 0); 2292 2293 if (-1 != Index) { 2294 2295 ClustersFound = ClustersRemaining; 2296 } 2297 } 2298 2299 if (0 == ClustersFound) { 2300 2301 // 2302 // Still nothing, so just take the largest free run we can find. 2303 // 2304 2305 ClustersFound = RtlFindLongestRunClear( &Vcb->FreeClusterBitMap, &Index ); 2306 2307 } 2308 #if DBG 2309 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ); 2310 #endif // DBG 2311 if (ClustersFound >= ClustersRemaining) { 2312 2313 ClustersFound = ClustersRemaining; 2314 } 2315 else { 2316 2317 // 2318 // If we just ran up to the end of a window, set up a hint that 2319 // we'd like the next consecutive window after this one. (FAT32 only) 2320 // 2321 2322 if ( ((Index + ClustersFound) == Vcb->FreeClusterBitMap.SizeOfBitMap) && 2323 FatIsFat32( Vcb) 2324 ) { 2325 2326 SelectNextContigWindow = TRUE; 2327 WindowRelativeHint = 2; 2328 } 2329 } 2330 } 2331 } 2332 2333 if (ClustersFound == 0) { 2334 2335 ULONG FaveWindow = 0; 2336 BOOLEAN SelectedWindow; 2337 2338 // 2339 // If we found no free clusters on a single-window FAT, 2340 // there was a bad problem with the free cluster count. 2341 // 2342 2343 if (1 == Vcb->NumberOfWindows) { 2344 2345 #ifdef _MSC_VER 2346 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" ) 2347 #endif 2348 FatBugCheck( 0, 5, 0 ); 2349 } 2350 2351 // 2352 // Switch to a new bucket. Possibly the next one if we're 2353 // currently on a roll (allocating contiguously) 2354 // 2355 2356 SelectedWindow = FALSE; 2357 2358 if ( SelectNextContigWindow) { 2359 2360 ULONG NextWindow; 2361 2362 NextWindow = (((ULONG)((PUCHAR)Vcb->CurrentWindow - (PUCHAR)Vcb->Windows)) / sizeof( FAT_WINDOW)) + 1; 2363 2364 if ((NextWindow < Vcb->NumberOfWindows) && 2365 ( Vcb->Windows[ NextWindow].ClustersFree > 0) 2366 ) { 2367 2368 FaveWindow = NextWindow; 2369 SelectedWindow = TRUE; 2370 } 2371 else { 2372 2373 if (ExactMatchRequired) { 2374 2375 // 2376 // Some dope tried to allocate a run past the end of the volume... 2377 // 2378 2379 try_leave( Result = FALSE); 2380 } 2381 2382 // 2383 // Give up on the contiguous allocation attempts 2384 // 2385 2386 WindowRelativeHint = 0; 2387 } 2388 2389 SelectNextContigWindow = FALSE; 2390 } 2391 2392 if (!SelectedWindow) { 2393 2394 // 2395 // Select a new window to begin allocating from 2396 // 2397 2398 FaveWindow = FatSelectBestWindow( Vcb); 2399 } 2400 2401 // 2402 // By now we'd better have found a window with some free clusters 2403 // 2404 2405 if (0 == Vcb->Windows[ FaveWindow].ClustersFree) { 2406 2407 #ifdef _MSC_VER 2408 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" ) 2409 #endif 2410 FatBugCheck( 0, 5, 1 ); 2411 } 2412 2413 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); 2414 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); 2415 2416 FatExamineFatEntries( IrpContext, Vcb, 2417 0, 2418 0, 2419 FALSE, 2420 &Vcb->Windows[FaveWindow], 2421 NULL); 2422 2423 if (!Wait) { 2424 2425 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); 2426 } 2427 2428 // 2429 // Now we'll just go around the loop again, having switched windows, 2430 // and allocate.... 2431 // 2432 #if DBG 2433 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ); 2434 #endif //DBG 2435 } // if (clustersfound == 0) 2436 else { 2437 2438 // 2439 // Take the clusters we found, convert our index to a cluster number 2440 // and unlock the bit map. 2441 // 2442 2443 Window = Vcb->CurrentWindow; 2444 2445 FatReserveClusters( IrpContext, Vcb, (Index + 2), ClustersFound ); 2446 2447 Cluster = Index + Window->FirstCluster; 2448 2449 Window->ClustersFree -= ClustersFound; 2450 NT_ASSERT( PreviousClear - ClustersFound == Window->ClustersFree ); 2451 2452 FatUnlockFreeClusterBitMap( Vcb ); 2453 LockedBitMap = FALSE; 2454 2455 // 2456 // Add the newly alloced run to the Mcb. 2457 // 2458 2459 BytesFound = ClustersFound << LogOfBytesPerCluster; 2460 2461 FatAddMcbEntry( Vcb, Mcb, 2462 CurrentVbo, 2463 FatGetLboFromIndex( Vcb, Cluster ), 2464 BytesFound ); 2465 2466 // 2467 // Connect the last allocated run with this one, and allocate 2468 // this run on the Fat. 2469 // 2470 2471 if (PriorLastCluster != 0) { 2472 2473 FatSetFatEntry( IrpContext, 2474 Vcb, 2475 PriorLastCluster, 2476 (FAT_ENTRY)Cluster ); 2477 } 2478 2479 // 2480 // Update the fat 2481 // 2482 2483 FatAllocateClusters( IrpContext, Vcb, Cluster, ClustersFound ); 2484 2485 // 2486 // Prepare for the next iteration. 2487 // 2488 2489 CurrentVbo += BytesFound; 2490 ClustersRemaining -= ClustersFound; 2491 PriorLastCluster = Cluster + ClustersFound - 1; 2492 } 2493 } // while (clustersremaining) 2494 2495 } _SEH2_FINALLY { 2496 2497 DebugUnwind( FatAllocateDiskSpace ); 2498 2499 ExReleaseResourceLite(&Vcb->ChangeBitMapResource); 2500 2501 // 2502 // Is there any unwinding to do? 2503 // 2504 2505 if ( _SEH2_AbnormalTermination() || (FALSE == Result)) { 2506 2507 // 2508 // Flag to the caller that they're getting nothing 2509 // 2510 2511 *ByteCount = 0; 2512 2513 // 2514 // There are three places we could have taken this exception: 2515 // when switching the window (FatExamineFatEntries), adding 2516 // a found run to the Mcb (FatAddMcbEntry), or when writing 2517 // the changes to the FAT (FatSetFatEntry). In the first case 2518 // we don't have anything to unwind before deallocation, and 2519 // can detect this by seeing if we have the ClusterBitmap 2520 // mutex out. 2521 2522 if (!LockedBitMap) { 2523 2524 FatLockFreeClusterBitMap( Vcb ); 2525 2526 // 2527 // In these cases, we have the possiblity that the FAT 2528 // window is still in place and we need to clear the bits. 2529 // If the Mcb entry isn't there (we raised trying to add 2530 // it), the effect of trying to remove it is a noop. 2531 // 2532 2533 if (Window == Vcb->CurrentWindow) { 2534 2535 // 2536 // Cluster reservation works on cluster 2 based window-relative 2537 // numbers, so we must convert. The subtraction will lose the 2538 // cluster 2 base, so bias the result. 2539 // 2540 2541 FatUnreserveClusters( IrpContext, Vcb, 2542 (Cluster - Window->FirstCluster) + 2, 2543 ClustersFound ); 2544 } 2545 2546 // 2547 // Note that FatDeallocateDiskSpace will take care of adjusting 2548 // to account for the entries in the Mcb. All we have to account 2549 // for is the last run that didn't make it. 2550 // 2551 2552 Window->ClustersFree += ClustersFound; 2553 Vcb->AllocationSupport.NumberOfFreeClusters += ClustersFound; 2554 2555 FatUnlockFreeClusterBitMap( Vcb ); 2556 2557 FatRemoveMcbEntry( Vcb, Mcb, CurrentVbo, BytesFound ); 2558 2559 } else { 2560 2561 // 2562 // Just drop the mutex now - we didn't manage to do anything 2563 // that needs to be backed out. 2564 // 2565 2566 FatUnlockFreeClusterBitMap( Vcb ); 2567 } 2568 2569 _SEH2_TRY { 2570 2571 // 2572 // Now we have tidied up, we are ready to just send the Mcb 2573 // off to deallocate disk space 2574 // 2575 2576 FatDeallocateDiskSpace( IrpContext, Vcb, Mcb, FALSE ); 2577 2578 } _SEH2_FINALLY { 2579 2580 // 2581 // Now finally (really), remove all the entries from the mcb 2582 // 2583 2584 FatRemoveMcbEntry( Vcb, Mcb, 0, 0xFFFFFFFF ); 2585 } _SEH2_END; 2586 } 2587 2588 DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0); 2589 2590 } _SEH2_END; // finally 2591 } 2592 2593 return; 2594 } 2595 2596 2597 2598 // 2599 // Limit our zeroing writes to 1 MB. 2600 // 2601 2602 #define MAX_ZERO_MDL_SIZE (1*1024*1024) 2603 2604 _Requires_lock_held_(_Global_critical_region_) 2605 VOID 2606 FatDeallocateDiskSpace ( 2607 IN PIRP_CONTEXT IrpContext, 2608 IN PVCB Vcb, 2609 IN PLARGE_MCB Mcb, 2610 IN BOOLEAN ZeroOnDeallocate 2611 ) 2612 2613 /*++ 2614 2615 Routine Description: 2616 2617 This procedure deallocates the disk space denoted by an input 2618 mcb. Note that the input MCB does not need to necessarily describe 2619 a chain that ends with a FAT_CLUSTER_LAST entry. 2620 2621 Pictorially what is done is the following 2622 2623 Fat |--a--|--b--|--c--| 2624 Mcb |--a--|--b--|--c--| 2625 2626 becomes 2627 2628 Fat |--0--|--0--|--0--| 2629 Mcb |--a--|--b--|--c--| 2630 2631 Arguments: 2632 2633 Vcb - Supplies the VCB being modified 2634 2635 Mcb - Supplies the MCB describing the disk space to deallocate. Note 2636 that Mcb is unchanged by this procedure. 2637 2638 2639 Return Value: 2640 2641 None. 2642 2643 --*/ 2644 2645 { 2646 LBO Lbo; 2647 VBO Vbo; 2648 2649 ULONG RunsInMcb; 2650 ULONG ByteCount; 2651 ULONG ClusterCount = 0; 2652 ULONG ClusterIndex = 0; 2653 ULONG McbIndex = 0; 2654 2655 UCHAR LogOfBytesPerCluster; 2656 2657 PFAT_WINDOW Window; 2658 2659 NTSTATUS ZeroingStatus = STATUS_SUCCESS; 2660 2661 PAGED_CODE(); 2662 2663 DebugTrace(+1, Dbg, "FatDeallocateDiskSpace\n", 0); 2664 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 2665 DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb); 2666 2667 LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster; 2668 2669 RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb ); 2670 2671 if ( RunsInMcb == 0 ) { 2672 2673 DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0); 2674 return; 2675 } 2676 2677 // 2678 // If we are supposed to zero out the allocation before freeing it, do so. 2679 // 2680 2681 if (ZeroOnDeallocate) { 2682 2683 _SEH2_TRY { 2684 2685 PIRP IoIrp; 2686 KEVENT IoEvent; 2687 IO_STATUS_BLOCK Iosb; 2688 PVOID Buffer = NULL; 2689 PMDL Mdl; 2690 ULONG ByteCountToZero; 2691 ULONG MdlSizeMapped; 2692 2693 // 2694 // Issue the writes down for each run in the Mcb 2695 // 2696 2697 KeInitializeEvent( &IoEvent, 2698 NotificationEvent, 2699 FALSE ); 2700 2701 for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) { 2702 2703 FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount ); 2704 2705 // 2706 // Assert that Fat files have no holes. 2707 // 2708 2709 NT_ASSERT( Lbo != 0 ); 2710 2711 // 2712 // Setup our MDL for the this run. 2713 // 2714 2715 if (ByteCount > MAX_ZERO_MDL_SIZE) { 2716 Mdl = FatBuildZeroMdl( IrpContext, MAX_ZERO_MDL_SIZE); 2717 } else { 2718 Mdl = FatBuildZeroMdl( IrpContext, ByteCount); 2719 } 2720 2721 if (!Mdl) { 2722 ZeroingStatus = STATUS_INSUFFICIENT_RESOURCES; 2723 goto try_exit; 2724 } 2725 2726 _SEH2_TRY { 2727 2728 // 2729 // Map the MDL. 2730 // 2731 2732 Buffer = MmGetSystemAddressForMdlSafe(Mdl, HighPagePriority|MdlMappingNoExecute); 2733 if (!Buffer) { 2734 NT_ASSERT( FALSE ); 2735 ZeroingStatus = STATUS_INSUFFICIENT_RESOURCES; 2736 goto try_exit2; 2737 } 2738 2739 // 2740 // We might not have not been able to get an MDL big enough to map the whole 2741 // run. In this case, break up the write. 2742 // 2743 2744 MdlSizeMapped = min( ByteCount, Mdl->ByteCount ); 2745 ByteCountToZero = ByteCount; 2746 2747 // 2748 // Loop until there are no bytes left to write 2749 // 2750 2751 while (ByteCountToZero != 0) { 2752 2753 // 2754 // Write zeros to each run. 2755 // 2756 2757 KeClearEvent( &IoEvent ); 2758 2759 IoIrp = IoBuildSynchronousFsdRequest( IRP_MJ_WRITE, 2760 Vcb->TargetDeviceObject, 2761 Buffer, 2762 MdlSizeMapped, 2763 (PLARGE_INTEGER)&Lbo, 2764 &IoEvent, 2765 &Iosb ); 2766 2767 if (IoIrp == NULL) { 2768 NT_ASSERT( FALSE ); 2769 ZeroingStatus = STATUS_INSUFFICIENT_RESOURCES; 2770 goto try_exit2; 2771 } 2772 2773 // 2774 // Set a flag indicating that we want to write through any 2775 // cache on the controller. This eliminates the need for 2776 // an explicit flush-device after the write. 2777 // 2778 2779 SetFlag( IoGetNextIrpStackLocation(IoIrp)->Flags, SL_WRITE_THROUGH ); 2780 2781 ZeroingStatus = IoCallDriver( Vcb->TargetDeviceObject, IoIrp ); 2782 2783 if (ZeroingStatus == STATUS_PENDING) { 2784 2785 (VOID)KeWaitForSingleObject( &IoEvent, 2786 Executive, 2787 KernelMode, 2788 FALSE, 2789 (PLARGE_INTEGER)NULL ); 2790 2791 ZeroingStatus = Iosb.Status; 2792 } 2793 2794 if (!NT_SUCCESS( ZeroingStatus )) { 2795 NT_ASSERT( FALSE ); 2796 goto try_exit2; 2797 } 2798 2799 // 2800 // Increment the starting offset where we will zero. 2801 // 2802 2803 Lbo += MdlSizeMapped; 2804 2805 // 2806 // Decrement ByteCount 2807 // 2808 2809 ByteCountToZero -= MdlSizeMapped; 2810 2811 if (ByteCountToZero < MdlSizeMapped) { 2812 MdlSizeMapped = ByteCountToZero; 2813 } 2814 2815 } 2816 2817 try_exit2: 2818 2819 NOTHING; 2820 2821 } _SEH2_FINALLY { 2822 2823 if (!FlagOn( Mdl->MdlFlags, MDL_SOURCE_IS_NONPAGED_POOL) && 2824 FlagOn( Mdl->MdlFlags, MDL_MAPPED_TO_SYSTEM_VA )) { 2825 2826 MmUnmapLockedPages( Mdl->MappedSystemVa, Mdl ); 2827 } 2828 IoFreeMdl( Mdl ); 2829 } _SEH2_END; 2830 2831 } 2832 2833 try_exit: 2834 2835 NOTHING; 2836 2837 } _SEH2_EXCEPT(FatExceptionFilter( NULL, _SEH2_GetExceptionInformation() )) { 2838 2839 // 2840 // If we failed to zero for some reason, still go ahead and deallocate 2841 // the clusters. Otherwise we'll leak space from the volume. 2842 // 2843 2844 ZeroingStatus = _SEH2_GetExceptionCode(); 2845 2846 } _SEH2_END; 2847 2848 } 2849 2850 NT_ASSERT( NT_SUCCESS(ZeroingStatus) ); 2851 2852 _SEH2_TRY { 2853 2854 // 2855 // Run though the Mcb, freeing all the runs in the fat. 2856 // 2857 // We do this in two steps (first update the fat, then the bitmap 2858 // (which can't fail)) to prevent other people from taking clusters 2859 // that we need to re-allocate in the event of unwind. 2860 // 2861 2862 ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE); 2863 2864 RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb ); 2865 2866 for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) { 2867 2868 FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount ); 2869 2870 // 2871 // Assert that Fat files have no holes. 2872 // 2873 2874 NT_ASSERT( Lbo != 0 ); 2875 2876 // 2877 // Write FAT_CLUSTER_AVAILABLE to each cluster in the run. 2878 // 2879 2880 if (ByteCount == 0xFFFFFFFF) { 2881 2882 // 2883 // Special case the computation of ClusterCout 2884 // when file is of max size (4GiB - 1). 2885 // 2886 2887 ClusterCount = (1 << (32 - LogOfBytesPerCluster)); 2888 2889 } else { 2890 2891 ClusterCount = ByteCount >> LogOfBytesPerCluster; 2892 } 2893 2894 ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo ); 2895 2896 FatFreeClusters( IrpContext, Vcb, ClusterIndex, ClusterCount ); 2897 } 2898 2899 // 2900 // From now on, nothing can go wrong .... (as in raise) 2901 // 2902 2903 FatLockFreeClusterBitMap( Vcb ); 2904 2905 for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) { 2906 2907 ULONG ClusterEnd; 2908 ULONG MyStart, MyLength, count; 2909 #if DBG 2910 #ifndef __REACTOS__ 2911 ULONG PreviousClear = 0; 2912 #endif 2913 ULONG i = 0; 2914 #endif 2915 2916 FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount ); 2917 2918 // 2919 // Mark the bits clear in the FreeClusterBitMap. 2920 // 2921 2922 if (ByteCount == 0xFFFFFFFF) { 2923 2924 // 2925 // Special case the computation of ClusterCout 2926 // when file is of max size (2^32 - 1). 2927 // 2928 2929 ClusterCount = (1 << (32 - LogOfBytesPerCluster)); 2930 2931 } else { 2932 2933 ClusterCount = ByteCount >> LogOfBytesPerCluster; 2934 } 2935 2936 ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo ); 2937 2938 Window = Vcb->CurrentWindow; 2939 2940 // 2941 // If we've divided the bitmap, elide bitmap manipulation for 2942 // runs that are outside the current bucket. 2943 // 2944 2945 ClusterEnd = ClusterIndex + ClusterCount - 1; 2946 2947 if (!(ClusterIndex > Window->LastCluster || 2948 ClusterEnd < Window->FirstCluster)) { 2949 2950 // 2951 // The run being freed overlaps the current bucket, so we'll 2952 // have to clear some bits. 2953 // 2954 2955 if (ClusterIndex < Window->FirstCluster && 2956 ClusterEnd > Window->LastCluster) { 2957 2958 MyStart = Window->FirstCluster; 2959 MyLength = Window->LastCluster - Window->FirstCluster + 1; 2960 2961 } else if (ClusterIndex < Window->FirstCluster) { 2962 2963 MyStart = Window->FirstCluster; 2964 MyLength = ClusterEnd - Window->FirstCluster + 1; 2965 2966 } else { 2967 2968 // 2969 // The range being freed starts in the bucket, and may possibly 2970 // extend beyond the bucket. 2971 // 2972 2973 MyStart = ClusterIndex; 2974 2975 if (ClusterEnd <= Window->LastCluster) { 2976 2977 MyLength = ClusterCount; 2978 2979 } else { 2980 2981 MyLength = Window->LastCluster - ClusterIndex + 1; 2982 } 2983 } 2984 2985 if (MyLength == 0) { 2986 2987 continue; 2988 } 2989 2990 #if DBG 2991 #ifndef __REACTOS__ 2992 #ifdef _MSC_VER 2993 #pragma prefast( suppress:28931, "this is DBG build only" ) 2994 #endif 2995 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ); 2996 #endif 2997 2998 2999 // 3000 // Verify that the Bits are all really set. 3001 // 3002 3003 NT_ASSERT( MyStart + MyLength - Window->FirstCluster <= Vcb->FreeClusterBitMap.SizeOfBitMap ); 3004 3005 for (i = 0; i < MyLength; i++) { 3006 3007 NT_ASSERT( RtlCheckBit(&Vcb->FreeClusterBitMap, 3008 MyStart - Window->FirstCluster + i) == 1 ); 3009 } 3010 #endif // DBG 3011 3012 FatUnreserveClusters( IrpContext, Vcb, 3013 MyStart - Window->FirstCluster + 2, 3014 MyLength ); 3015 } 3016 3017 // 3018 // Adjust the ClustersFree count for each bitmap window, even the ones 3019 // that are not the current window. 3020 // 3021 3022 if (FatIsFat32(Vcb)) { 3023 3024 Window = &Vcb->Windows[FatWindowOfCluster( ClusterIndex )]; 3025 3026 } else { 3027 3028 Window = &Vcb->Windows[0]; 3029 } 3030 3031 MyStart = ClusterIndex; 3032 3033 for (MyLength = ClusterCount; MyLength > 0; MyLength -= count) { 3034 3035 count = FatMin(Window->LastCluster - MyStart + 1, MyLength); 3036 Window->ClustersFree += count; 3037 3038 // 3039 // If this was not the last window this allocation spanned, 3040 // advance to the next. 3041 // 3042 3043 if (MyLength != count) { 3044 3045 Window++; 3046 MyStart = Window->FirstCluster; 3047 } 3048 } 3049 3050 // 3051 // Deallocation is now complete. Adjust the free cluster count. 3052 // 3053 3054 Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount; 3055 } 3056 3057 #if DBG 3058 if (Vcb->CurrentWindow->ClustersFree != 3059 RtlNumberOfClearBits(&Vcb->FreeClusterBitMap)) { 3060 3061 DbgPrint("%x vs %x\n", Vcb->CurrentWindow->ClustersFree, 3062 RtlNumberOfClearBits(&Vcb->FreeClusterBitMap)); 3063 3064 DbgPrint("%x for %x\n", ClusterIndex, ClusterCount); 3065 } 3066 #endif 3067 3068 FatUnlockFreeClusterBitMap( Vcb ); 3069 3070 3071 } _SEH2_FINALLY { 3072 3073 DebugUnwind( FatDeallocateDiskSpace ); 3074 3075 // 3076 // Is there any unwinding to do? 3077 // 3078 3079 ExReleaseResourceLite(&Vcb->ChangeBitMapResource); 3080 3081 if ( _SEH2_AbnormalTermination() ) { 3082 3083 LBO LocalLbo; 3084 VBO LocalVbo; 3085 3086 ULONG Index; 3087 ULONG Clusters; 3088 ULONG FatIndex; 3089 ULONG PriorLastIndex; 3090 3091 // 3092 // For each entry we already deallocated, reallocate it, 3093 // chaining together as nessecary. Note that we continue 3094 // up to and including the last "for" iteration even though 3095 // the SetFatRun could not have been successful. This 3096 // allows us a convienent way to re-link the final successful 3097 // SetFatRun. 3098 // 3099 // It is possible that the reason we got here will prevent us 3100 // from succeeding in this operation. 3101 // 3102 3103 PriorLastIndex = 0; 3104 3105 for (Index = 0; Index <= McbIndex; Index++) { 3106 3107 FatGetNextMcbEntry(Vcb, Mcb, Index, &LocalVbo, &LocalLbo, &ByteCount); 3108 3109 if (ByteCount == 0xFFFFFFFF) { 3110 3111 // 3112 // Special case the computation of ClusterCout 3113 // when file is of max size (2^32 - 1). 3114 // 3115 3116 Clusters = (1 << (32 - LogOfBytesPerCluster)); 3117 3118 } else { 3119 3120 Clusters = ByteCount >> LogOfBytesPerCluster; 3121 } 3122 3123 FatIndex = FatGetIndexFromLbo( Vcb, LocalLbo ); 3124 3125 // 3126 // We must always restore the prior iteration's last 3127 // entry, pointing it to the first cluster of this run. 3128 // 3129 3130 if (PriorLastIndex != 0) { 3131 3132 FatSetFatEntry( IrpContext, 3133 Vcb, 3134 PriorLastIndex, 3135 (FAT_ENTRY)FatIndex ); 3136 } 3137 3138 // 3139 // If this is not the last entry (the one that failed) 3140 // then reallocate the disk space on the fat. 3141 // 3142 3143 if ( Index < McbIndex ) { 3144 3145 FatAllocateClusters(IrpContext, Vcb, FatIndex, Clusters); 3146 3147 PriorLastIndex = FatIndex + Clusters - 1; 3148 } 3149 } 3150 } 3151 3152 DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0); 3153 } _SEH2_END; 3154 3155 return; 3156 } 3157 3158 3159 _Requires_lock_held_(_Global_critical_region_) 3160 VOID 3161 FatSplitAllocation ( 3162 IN PIRP_CONTEXT IrpContext, 3163 IN PVCB Vcb, 3164 IN OUT PLARGE_MCB Mcb, 3165 IN VBO SplitAtVbo, 3166 OUT PLARGE_MCB RemainingMcb 3167 ) 3168 3169 /*++ 3170 3171 Routine Description: 3172 3173 This procedure takes a single mcb and splits its allocation into 3174 two separate allocation units. The separation must only be done 3175 on cluster boundaries, otherwise we bugcheck. 3176 3177 On the disk this actually works by inserting a FAT_CLUSTER_LAST into 3178 the last index of the first part being split out. 3179 3180 Pictorially what is done is the following (where ! denotes the end of 3181 the fat chain (i.e., FAT_CLUSTER_LAST)): 3182 3183 3184 Mcb |--a--|--b--|--c--|--d--|--e--|--f--| 3185 3186 ^ 3187 SplitAtVbo ---------------------+ 3188 3189 RemainingMcb (empty) 3190 3191 becomes 3192 3193 Mcb |--a--|--b--|--c--! 3194 3195 3196 RemainingMcb |--d--|--e--|--f--| 3197 3198 Arguments: 3199 3200 Vcb - Supplies the VCB being modified 3201 3202 Mcb - Supplies the MCB describing the allocation being split into 3203 two parts. Upon return this Mcb now contains the first chain. 3204 3205 SplitAtVbo - Supplies the VBO of the first byte for the second chain 3206 that we creating. 3207 3208 RemainingMcb - Receives the MCB describing the second chain of allocated 3209 disk space. The caller passes in an initialized Mcb that 3210 is filled in by this procedure STARTING AT VBO 0. 3211 3212 Return Value: 3213 3214 VOID - TRUE if the operation completed and FALSE if it had to 3215 block but could not. 3216 3217 --*/ 3218 3219 { 3220 VBO SourceVbo; 3221 VBO TargetVbo; 3222 VBO DontCare; 3223 3224 LBO Lbo; 3225 3226 ULONG ByteCount; 3227 3228 #if DBG 3229 ULONG BytesPerCluster; 3230 #endif 3231 3232 PAGED_CODE(); 3233 3234 DebugTrace(+1, Dbg, "FatSplitAllocation\n", 0); 3235 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 3236 DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb); 3237 DebugTrace( 0, Dbg, " SplitAtVbo = %8lx\n", SplitAtVbo); 3238 DebugTrace( 0, Dbg, " RemainingMcb = %p\n", RemainingMcb); 3239 3240 #if DBG 3241 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster; 3242 #endif 3243 3244 // 3245 // Assert that the split point is cluster alligned 3246 // 3247 3248 NT_ASSERT( (SplitAtVbo & (BytesPerCluster - 1)) == 0 ); 3249 3250 // 3251 // We should never be handed an empty source MCB and asked to split 3252 // at a non zero point. 3253 // 3254 3255 NT_ASSERT( !((0 != SplitAtVbo) && (0 == FsRtlNumberOfRunsInLargeMcb( Mcb)))); 3256 3257 // 3258 // Assert we were given an empty target Mcb. 3259 // 3260 3261 // 3262 // This assert is commented out to avoid hitting in the Ea error 3263 // path. In that case we will be using the same Mcb's to split the 3264 // allocation that we used to merge them. The target Mcb will contain 3265 // the runs that the split will attempt to insert. 3266 // 3267 // 3268 // NT_ASSERT( FsRtlNumberOfRunsInMcb( RemainingMcb ) == 0 ); 3269 // 3270 3271 _SEH2_TRY { 3272 3273 // 3274 // Move the runs after SplitAtVbo from the souce to the target 3275 // 3276 3277 SourceVbo = SplitAtVbo; 3278 TargetVbo = 0; 3279 3280 while (FatLookupMcbEntry(Vcb, Mcb, SourceVbo, &Lbo, &ByteCount, NULL)) { 3281 3282 FatAddMcbEntry( Vcb, RemainingMcb, TargetVbo, Lbo, ByteCount ); 3283 3284 FatRemoveMcbEntry( Vcb, Mcb, SourceVbo, ByteCount ); 3285 3286 TargetVbo += ByteCount; 3287 SourceVbo += ByteCount; 3288 3289 // 3290 // If SourceVbo overflows, we were actually snipping off the end 3291 // of the maximal file ... and are now done. 3292 // 3293 3294 if (SourceVbo == 0) { 3295 3296 break; 3297 } 3298 } 3299 3300 // 3301 // Mark the last pre-split cluster as a FAT_LAST_CLUSTER 3302 // 3303 3304 if ( SplitAtVbo != 0 ) { 3305 3306 FatLookupLastMcbEntry( Vcb, Mcb, &DontCare, &Lbo, NULL ); 3307 3308 FatSetFatEntry( IrpContext, 3309 Vcb, 3310 FatGetIndexFromLbo( Vcb, Lbo ), 3311 FAT_CLUSTER_LAST ); 3312 } 3313 3314 } _SEH2_FINALLY { 3315 3316 DebugUnwind( FatSplitAllocation ); 3317 3318 // 3319 // If we got an exception, we must glue back together the Mcbs 3320 // 3321 3322 if ( _SEH2_AbnormalTermination() ) { 3323 3324 TargetVbo = SplitAtVbo; 3325 SourceVbo = 0; 3326 3327 while (FatLookupMcbEntry(Vcb, RemainingMcb, SourceVbo, &Lbo, &ByteCount, NULL)) { 3328 3329 FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount ); 3330 3331 FatRemoveMcbEntry( Vcb, RemainingMcb, SourceVbo, ByteCount ); 3332 3333 TargetVbo += ByteCount; 3334 SourceVbo += ByteCount; 3335 } 3336 } 3337 3338 DebugTrace(-1, Dbg, "FatSplitAllocation -> (VOID)\n", 0); 3339 } _SEH2_END; 3340 3341 return; 3342 } 3343 3344 3345 _Requires_lock_held_(_Global_critical_region_) 3346 VOID 3347 FatMergeAllocation ( 3348 IN PIRP_CONTEXT IrpContext, 3349 IN PVCB Vcb, 3350 IN OUT PLARGE_MCB Mcb, 3351 IN PLARGE_MCB SecondMcb 3352 ) 3353 3354 /*++ 3355 3356 Routine Description: 3357 3358 This routine takes two separate allocations described by two MCBs and 3359 joins them together into one allocation. 3360 3361 Pictorially what is done is the following (where ! denotes the end of 3362 the fat chain (i.e., FAT_CLUSTER_LAST)): 3363 3364 3365 Mcb |--a--|--b--|--c--! 3366 3367 SecondMcb |--d--|--e--|--f--| 3368 3369 becomes 3370 3371 Mcb |--a--|--b--|--c--|--d--|--e--|--f--| 3372 3373 SecondMcb |--d--|--e--|--f--| 3374 3375 3376 Arguments: 3377 3378 Vcb - Supplies the VCB being modified 3379 3380 Mcb - Supplies the MCB of the first allocation that is being modified. 3381 Upon return this Mcb will also describe the newly enlarged 3382 allocation 3383 3384 SecondMcb - Supplies the ZERO VBO BASED MCB of the second allocation 3385 that is being appended to the first allocation. This 3386 procedure leaves SecondMcb unchanged. 3387 3388 Return Value: 3389 3390 VOID - TRUE if the operation completed and FALSE if it had to 3391 block but could not. 3392 3393 --*/ 3394 3395 { 3396 VBO SpliceVbo = 0; 3397 LBO SpliceLbo; 3398 3399 VBO SourceVbo; 3400 VBO TargetVbo = 0; 3401 3402 LBO Lbo; 3403 3404 ULONG ByteCount; 3405 3406 PAGED_CODE(); 3407 3408 DebugTrace(+1, Dbg, "FatMergeAllocation\n", 0); 3409 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 3410 DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb); 3411 DebugTrace( 0, Dbg, " SecondMcb = %p\n", SecondMcb); 3412 3413 _SEH2_TRY { 3414 3415 // 3416 // Append the runs from SecondMcb to Mcb 3417 // 3418 3419 (void)FatLookupLastMcbEntry( Vcb, Mcb, &SpliceVbo, &SpliceLbo, NULL ); 3420 3421 SourceVbo = 0; 3422 TargetVbo = SpliceVbo + 1; 3423 3424 while (FatLookupMcbEntry(Vcb, SecondMcb, SourceVbo, &Lbo, &ByteCount, NULL)) { 3425 3426 FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount ); 3427 3428 SourceVbo += ByteCount; 3429 TargetVbo += ByteCount; 3430 } 3431 3432 // 3433 // Link the last pre-merge cluster to the first cluster of SecondMcb 3434 // 3435 3436 FatLookupMcbEntry( Vcb, SecondMcb, 0, &Lbo, (PULONG)NULL, NULL ); 3437 3438 FatSetFatEntry( IrpContext, 3439 Vcb, 3440 FatGetIndexFromLbo( Vcb, SpliceLbo ), 3441 (FAT_ENTRY)FatGetIndexFromLbo( Vcb, Lbo ) ); 3442 3443 } _SEH2_FINALLY { 3444 3445 DebugUnwind( FatMergeAllocation ); 3446 3447 // 3448 // If we got an exception, we must remove the runs added to Mcb 3449 // 3450 3451 if ( _SEH2_AbnormalTermination() ) { 3452 3453 ULONG CutLength; 3454 3455 if ((CutLength = TargetVbo - (SpliceVbo + 1)) != 0) { 3456 3457 FatRemoveMcbEntry( Vcb, Mcb, SpliceVbo + 1, CutLength); 3458 } 3459 } 3460 3461 DebugTrace(-1, Dbg, "FatMergeAllocation -> (VOID)\n", 0); 3462 } _SEH2_END; 3463 3464 return; 3465 } 3466 3467 3468 // 3469 // Internal support routine 3470 // 3471 3472 CLUSTER_TYPE 3473 FatInterpretClusterType ( 3474 IN PVCB Vcb, 3475 IN FAT_ENTRY Entry 3476 ) 3477 3478 /*++ 3479 3480 Routine Description: 3481 3482 This procedure tells the caller how to interpret the input fat table 3483 entry. It will indicate if the fat cluster is available, resereved, 3484 bad, the last one, or the another fat index. This procedure can deal 3485 with both 12 and 16 bit fat. 3486 3487 Arguments: 3488 3489 Vcb - Supplies the Vcb to examine, yields 12/16 bit info 3490 3491 Entry - Supplies the fat entry to examine 3492 3493 Return Value: 3494 3495 CLUSTER_TYPE - Is the type of the input Fat entry 3496 3497 --*/ 3498 3499 { 3500 DebugTrace(+1, Dbg, "InterpretClusterType\n", 0); 3501 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 3502 DebugTrace( 0, Dbg, " Entry = %8lx\n", Entry); 3503 3504 PAGED_CODE(); 3505 3506 switch(Vcb->AllocationSupport.FatIndexBitSize ) { 3507 case 32: 3508 Entry &= FAT32_ENTRY_MASK; 3509 break; 3510 3511 case 12: 3512 NT_ASSERT( Entry <= 0xfff ); 3513 if (Entry >= 0x0ff0) { 3514 Entry |= 0x0FFFF000; 3515 } 3516 break; 3517 3518 default: 3519 case 16: 3520 NT_ASSERT( Entry <= 0xffff ); 3521 if (Entry >= 0x0fff0) { 3522 Entry |= 0x0FFF0000; 3523 } 3524 break; 3525 } 3526 3527 if (Entry == FAT_CLUSTER_AVAILABLE) { 3528 3529 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterAvailable\n", 0); 3530 3531 return FatClusterAvailable; 3532 3533 } else if (Entry < FAT_CLUSTER_RESERVED) { 3534 3535 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterNext\n", 0); 3536 3537 return FatClusterNext; 3538 3539 } else if (Entry < FAT_CLUSTER_BAD) { 3540 3541 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterReserved\n", 0); 3542 3543 return FatClusterReserved; 3544 3545 } else if (Entry == FAT_CLUSTER_BAD) { 3546 3547 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterBad\n", 0); 3548 3549 return FatClusterBad; 3550 3551 } else { 3552 3553 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterLast\n", 0); 3554 3555 return FatClusterLast; 3556 } 3557 } 3558 3559 3560 // 3561 // Internal support routine 3562 // 3563 3564 VOID 3565 FatLookupFatEntry ( 3566 IN PIRP_CONTEXT IrpContext, 3567 IN PVCB Vcb, 3568 IN ULONG FatIndex, 3569 IN OUT PULONG FatEntry, 3570 IN OUT PFAT_ENUMERATION_CONTEXT Context 3571 ) 3572 3573 /*++ 3574 3575 Routine Description: 3576 3577 This routine takes an index into the fat and gives back the value 3578 in the Fat at this index. At any given time, for a 16 bit fat, this 3579 routine allows only one page per volume of the fat to be pinned in 3580 memory. For a 12 bit bit fat, the entire fat (max 6k) is pinned. This 3581 extra layer of caching makes the vast majority of requests very 3582 fast. The context for this caching stored in a structure in the Vcb. 3583 3584 Arguments: 3585 3586 Vcb - Supplies the Vcb to examine, yields 12/16 bit info, 3587 fat access context, etc. 3588 3589 FatIndex - Supplies the fat index to examine. 3590 3591 FatEntry - Receives the fat entry pointed to by FatIndex. Note that 3592 it must point to non-paged pool. 3593 3594 Context - This structure keeps track of a page of pinned fat between calls. 3595 3596 --*/ 3597 3598 { 3599 PAGED_CODE(); 3600 3601 DebugTrace(+1, Dbg, "FatLookupFatEntry\n", 0); 3602 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 3603 DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex); 3604 DebugTrace( 0, Dbg, " FatEntry = %8lx\n", FatEntry); 3605 3606 // 3607 // Make sure they gave us a valid fat index. 3608 // 3609 3610 FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex); 3611 3612 // 3613 // Case on 12 or 16 bit fats. 3614 // 3615 // In the 12 bit case (mostly floppies) we always have the whole fat 3616 // (max 6k bytes) pinned during allocation operations. This is possibly 3617 // a wee bit slower, but saves headaches over fat entries with 8 bits 3618 // on one page, and 4 bits on the next. 3619 // 3620 // The 16 bit case always keeps the last used page pinned until all 3621 // operations are done and it is unpinned. 3622 // 3623 3624 // 3625 // DEAL WITH 12 BIT CASE 3626 // 3627 3628 if (Vcb->AllocationSupport.FatIndexBitSize == 12) { 3629 3630 // 3631 // Check to see if the fat is already pinned, otherwise pin it. 3632 // 3633 3634 if (Context->Bcb == NULL) { 3635 3636 FatReadVolumeFile( IrpContext, 3637 Vcb, 3638 FatReservedBytes( &Vcb->Bpb ), 3639 FatBytesPerFat( &Vcb->Bpb ), 3640 &Context->Bcb, 3641 &Context->PinnedPage ); 3642 } 3643 3644 // 3645 // Load the return value. 3646 // 3647 3648 3649 FatLookup12BitEntry( Context->PinnedPage, FatIndex, FatEntry ); 3650 3651 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) { 3652 3653 // 3654 // DEAL WITH 32 BIT CASE 3655 // 3656 3657 ULONG PageEntryOffset; 3658 ULONG OffsetIntoVolumeFile; 3659 3660 // 3661 // Initialize two local variables that help us. 3662 // 3663 OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(FAT_ENTRY); 3664 PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(FAT_ENTRY); 3665 3666 // 3667 // Check to see if we need to read in a new page of fat 3668 // 3669 3670 if ((Context->Bcb == NULL) || 3671 (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) { 3672 3673 // 3674 // The entry wasn't in the pinned page, so must we unpin the current 3675 // page (if any) and read in a new page. 3676 // 3677 3678 FatUnpinBcb( IrpContext, Context->Bcb ); 3679 3680 FatReadVolumeFile( IrpContext, 3681 Vcb, 3682 OffsetIntoVolumeFile & ~(PAGE_SIZE - 1), 3683 PAGE_SIZE, 3684 &Context->Bcb, 3685 &Context->PinnedPage ); 3686 3687 Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1); 3688 } 3689 3690 // 3691 // Grab the fat entry from the pinned page, and return 3692 // 3693 3694 *FatEntry = ((PULONG)(Context->PinnedPage))[PageEntryOffset] & FAT32_ENTRY_MASK; 3695 3696 } else { 3697 3698 // 3699 // DEAL WITH 16 BIT CASE 3700 // 3701 3702 ULONG PageEntryOffset; 3703 ULONG OffsetIntoVolumeFile; 3704 3705 // 3706 // Initialize two local variables that help us. 3707 // 3708 3709 OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(USHORT); 3710 PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(USHORT); 3711 3712 // 3713 // Check to see if we need to read in a new page of fat 3714 // 3715 3716 if ((Context->Bcb == NULL) || 3717 (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) { 3718 3719 // 3720 // The entry wasn't in the pinned page, so must we unpin the current 3721 // page (if any) and read in a new page. 3722 // 3723 3724 FatUnpinBcb( IrpContext, Context->Bcb ); 3725 3726 FatReadVolumeFile( IrpContext, 3727 Vcb, 3728 OffsetIntoVolumeFile & ~(PAGE_SIZE - 1), 3729 PAGE_SIZE, 3730 &Context->Bcb, 3731 &Context->PinnedPage ); 3732 3733 Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1); 3734 } 3735 3736 // 3737 // Grab the fat entry from the pinned page, and return 3738 // 3739 3740 *FatEntry = ((PUSHORT)(Context->PinnedPage))[PageEntryOffset]; 3741 } 3742 3743 DebugTrace(-1, Dbg, "FatLookupFatEntry -> (VOID)\n", 0); 3744 return; 3745 } 3746 3747 3748 _Requires_lock_held_(_Global_critical_region_) 3749 VOID 3750 FatSetFatEntry ( 3751 IN PIRP_CONTEXT IrpContext, 3752 IN PVCB Vcb, 3753 IN ULONG FatIndex, 3754 IN FAT_ENTRY FatEntry 3755 ) 3756 3757 /*++ 3758 3759 Routine Description: 3760 3761 This routine takes an index into the fat and puts a value in the Fat 3762 at this index. The routine special cases 12, 16 and 32 bit fats. In 3763 all cases we go to the cache manager for a piece of the fat. 3764 3765 We have a special form of this call for setting the DOS-style dirty bit. 3766 Unlike the dirty bit in the boot sector, we do not go to special effort 3767 to make sure that this hits the disk synchronously - if the system goes 3768 down in the window between the dirty bit being set in the boot sector 3769 and the FAT index zero dirty bit being lazy written, then life is tough. 3770 3771 The only possible scenario is that Win9x may see what it thinks is a clean 3772 volume that really isn't (hopefully Memphis will pay attention to our dirty 3773 bit as well). The dirty bit will get out quickly, and if heavy activity is 3774 occurring, then the dirty bit should actually be there virtually all of the 3775 time since the act of cleaning the volume is the "rare" occurance. 3776 3777 There are synchronization concerns that would crop up if we tried to make 3778 this synchronous. This thread may already own the Bcb shared for the first 3779 sector of the FAT (so we can't get it exclusive for a writethrough). This 3780 would require some more serious replumbing to work around than I want to 3781 consider at this time. 3782 3783 We can and do, however, synchronously set the bit clean. 3784 3785 At this point the reader should understand why the NT dirty bit is where it is. 3786 3787 Arguments: 3788 3789 Vcb - Supplies the Vcb to examine, yields 12/16/32 bit info, etc. 3790 3791 FatIndex - Supplies the destination fat index. 3792 3793 FatEntry - Supplies the source fat entry. 3794 3795 --*/ 3796 3797 { 3798 LBO Lbo; 3799 PBCB Bcb = NULL; 3800 ULONG SectorSize; 3801 ULONG OffsetIntoVolumeFile; 3802 ULONG WasWait = TRUE; 3803 BOOLEAN RegularOperation = TRUE; 3804 BOOLEAN CleaningOperation = FALSE; 3805 BOOLEAN ReleaseMutex = FALSE; 3806 3807 PAGED_CODE(); 3808 3809 DebugTrace(+1, Dbg, "FatSetFatEntry\n", 0); 3810 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 3811 DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex); 3812 DebugTrace( 0, Dbg, " FatEntry = %4x\n", FatEntry); 3813 3814 // 3815 // Make sure they gave us a valid fat index if this isn't the special 3816 // clean-bit modifying call. 3817 // 3818 3819 if (FatIndex == FAT_DIRTY_BIT_INDEX) { 3820 3821 // 3822 // We are setting the clean bit state. Of course, we could 3823 // have corruption that would cause us to try to fiddle the 3824 // reserved index - we guard against this by having the 3825 // special entry values use the reserved high 4 bits that 3826 // we know that we'll never try to set. 3827 // 3828 3829 // 3830 // We don't want to repin the FAT pages involved here. Just 3831 // let the lazy writer hit them when it can. 3832 // 3833 3834 RegularOperation = FALSE; 3835 3836 switch (FatEntry) { 3837 case FAT_CLEAN_VOLUME: 3838 FatEntry = (FAT_ENTRY)FAT_CLEAN_ENTRY; 3839 CleaningOperation = TRUE; 3840 break; 3841 3842 case FAT_DIRTY_VOLUME: 3843 switch (Vcb->AllocationSupport.FatIndexBitSize) { 3844 case 12: 3845 FatEntry = FAT12_DIRTY_ENTRY; 3846 break; 3847 3848 case 32: 3849 FatEntry = FAT32_DIRTY_ENTRY; 3850 break; 3851 3852 default: 3853 FatEntry = FAT16_DIRTY_ENTRY; 3854 break; 3855 } 3856 break; 3857 3858 default: 3859 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR ); 3860 break; 3861 } 3862 3863 // 3864 // Disable dirtying semantics for the duration of this operation. Force this 3865 // operation to wait for the duration. 3866 // 3867 3868 WasWait = FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ); 3869 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT | IRP_CONTEXT_FLAG_DISABLE_DIRTY ); 3870 3871 } else { 3872 3873 NT_ASSERT( !(FatEntry & ~FAT32_ENTRY_MASK) ); 3874 FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex); 3875 } 3876 3877 // 3878 // Set Sector Size 3879 // 3880 3881 SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector; 3882 3883 // 3884 // Case on 12 or 16 bit fats. 3885 // 3886 // In the 12 bit case (mostly floppies) we always have the whole fat 3887 // (max 6k bytes) pinned during allocation operations. This is possibly 3888 // a wee bit slower, but saves headaches over fat entries with 8 bits 3889 // on one page, and 4 bits on the next. 3890 // 3891 // In the 16 bit case we only read the page that we need to set the fat 3892 // entry. 3893 // 3894 3895 // 3896 // DEAL WITH 12 BIT CASE 3897 // 3898 3899 _SEH2_TRY { 3900 3901 if (Vcb->AllocationSupport.FatIndexBitSize == 12) { 3902 3903 PVOID PinnedFat; 3904 3905 // 3906 // Make sure we have a valid entry 3907 // 3908 3909 FatEntry &= 0xfff; 3910 3911 // 3912 // We read in the entire fat. Note that using prepare write marks 3913 // the bcb pre-dirty, so we don't have to do it explicitly. 3914 // 3915 3916 OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) + FatIndex * 3 / 2; 3917 3918 FatPrepareWriteVolumeFile( IrpContext, 3919 Vcb, 3920 FatReservedBytes( &Vcb->Bpb ), 3921 FatBytesPerFat( &Vcb->Bpb ), 3922 &Bcb, 3923 &PinnedFat, 3924 RegularOperation, 3925 FALSE ); 3926 3927 // 3928 // Mark the sector(s) dirty in the DirtyFatMcb. This call is 3929 // complicated somewhat for the 12 bit case since a single 3930 // entry write can span two sectors (and pages). 3931 // 3932 // Get the Lbo for the sector where the entry starts, and add it to 3933 // the dirty fat Mcb. 3934 // 3935 3936 Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1); 3937 3938 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize); 3939 3940 // 3941 // If the entry started on the last byte of the sector, it continues 3942 // to the next sector, so mark the next sector dirty as well. 3943 // 3944 // Note that this entry will simply coalese with the last entry, 3945 // so this operation cannot fail. Also if we get this far, we have 3946 // made it, so no unwinding will be needed. 3947 // 3948 3949 if ( (OffsetIntoVolumeFile & (SectorSize - 1)) == (SectorSize - 1) ) { 3950 3951 Lbo += SectorSize; 3952 3953 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize ); 3954 } 3955 3956 // 3957 // Store the entry into the fat; we need a little synchonization 3958 // here and can't use a spinlock since the bytes might not be 3959 // resident. 3960 // 3961 3962 FatLockFreeClusterBitMap( Vcb ); 3963 ReleaseMutex = TRUE; 3964 3965 FatSet12BitEntry( PinnedFat, FatIndex, FatEntry ); 3966 3967 FatUnlockFreeClusterBitMap( Vcb ); 3968 ReleaseMutex = FALSE; 3969 3970 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) { 3971 3972 // 3973 // DEAL WITH 32 BIT CASE 3974 // 3975 3976 PULONG PinnedFatEntry32; 3977 3978 // 3979 // Read in a new page of fat 3980 // 3981 3982 OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) + 3983 FatIndex * sizeof( FAT_ENTRY ); 3984 3985 FatPrepareWriteVolumeFile( IrpContext, 3986 Vcb, 3987 OffsetIntoVolumeFile, 3988 sizeof(FAT_ENTRY), 3989 &Bcb, 3990 (PVOID *)&PinnedFatEntry32, 3991 RegularOperation, 3992 FALSE ); 3993 // 3994 // Mark the sector dirty in the DirtyFatMcb 3995 // 3996 3997 Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1); 3998 3999 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize); 4000 4001 // 4002 // Store the FatEntry to the pinned page. 4003 // 4004 // Preserve the reserved bits in FAT32 entries in the file heap. 4005 // 4006 4007 #ifdef ALPHA 4008 FatLockFreeClusterBitMap( Vcb ); 4009 ReleaseMutex = TRUE; 4010 #endif // ALPHA 4011 4012 if (FatIndex != FAT_DIRTY_BIT_INDEX) { 4013 4014 *PinnedFatEntry32 = ((*PinnedFatEntry32 & ~FAT32_ENTRY_MASK) | FatEntry); 4015 4016 } else { 4017 4018 *PinnedFatEntry32 = FatEntry; 4019 } 4020 4021 #ifdef ALPHA 4022 FatUnlockFreeClusterBitMap( Vcb ); 4023 ReleaseMutex = FALSE; 4024 #endif // ALPHA 4025 4026 } else { 4027 4028 // 4029 // DEAL WITH 16 BIT CASE 4030 // 4031 4032 PUSHORT PinnedFatEntry; 4033 4034 // 4035 // Read in a new page of fat 4036 // 4037 4038 OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) + 4039 FatIndex * sizeof(USHORT); 4040 4041 FatPrepareWriteVolumeFile( IrpContext, 4042 Vcb, 4043 OffsetIntoVolumeFile, 4044 sizeof(USHORT), 4045 &Bcb, 4046 (PVOID *)&PinnedFatEntry, 4047 RegularOperation, 4048 FALSE ); 4049 // 4050 // Mark the sector dirty in the DirtyFatMcb 4051 // 4052 4053 Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1); 4054 4055 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize); 4056 4057 // 4058 // Store the FatEntry to the pinned page. 4059 // 4060 // We need extra synchronization here for broken architectures 4061 // like the ALPHA that don't support atomic 16 bit writes. 4062 // 4063 4064 #ifdef ALPHA 4065 FatLockFreeClusterBitMap( Vcb ); 4066 ReleaseMutex = TRUE; 4067 #endif // ALPHA 4068 4069 *PinnedFatEntry = (USHORT)FatEntry; 4070 4071 #ifdef ALPHA 4072 FatUnlockFreeClusterBitMap( Vcb ); 4073 ReleaseMutex = FALSE; 4074 #endif // ALPHA 4075 } 4076 4077 } _SEH2_FINALLY { 4078 4079 DebugUnwind( FatSetFatEntry ); 4080 4081 // 4082 // Re-enable volume dirtying in case this was a dirty bit operation. 4083 // 4084 4085 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_DISABLE_DIRTY ); 4086 4087 // 4088 // Make this operation asynchronous again if needed. 4089 // 4090 4091 if (!WasWait) { 4092 4093 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ); 4094 } 4095 4096 // 4097 // If we still somehow have the Mutex, release it. 4098 // 4099 4100 if (ReleaseMutex) { 4101 4102 NT_ASSERT( _SEH2_AbnormalTermination() ); 4103 4104 FatUnlockFreeClusterBitMap( Vcb ); 4105 } 4106 4107 // 4108 // Unpin the Bcb. For cleaning operations or if the corruption was detected while mounting we make this write-through. 4109 // 4110 4111 if ((CleaningOperation || 4112 FlagOn(Vcb->VcbState, VCB_STATE_FLAG_MOUNT_IN_PROGRESS)) && 4113 Bcb) { 4114 4115 IO_STATUS_BLOCK IgnoreStatus; 4116 4117 CcRepinBcb( Bcb ); 4118 CcUnpinData( Bcb ); 4119 DbgDoit( IrpContext->PinCount -= 1 ); 4120 CcUnpinRepinnedBcb( Bcb, TRUE, &IgnoreStatus ); 4121 4122 } else { 4123 4124 FatUnpinBcb(IrpContext, Bcb); 4125 } 4126 4127 DebugTrace(-1, Dbg, "FatSetFatEntry -> (VOID)\n", 0); 4128 } _SEH2_END; 4129 4130 return; 4131 } 4132 4133 4134 // 4135 // Internal support routine 4136 // 4137 4138 VOID 4139 FatSetFatRun ( 4140 IN PIRP_CONTEXT IrpContext, 4141 IN PVCB Vcb, 4142 IN ULONG StartingFatIndex, 4143 IN ULONG ClusterCount, 4144 IN BOOLEAN ChainTogether 4145 ) 4146 4147 /*++ 4148 4149 Routine Description: 4150 4151 This routine sets a continuous run of clusters in the fat. If ChainTogether 4152 is TRUE, then the clusters are linked together as in normal Fat fasion, 4153 with the last cluster receiving FAT_CLUSTER_LAST. If ChainTogether is 4154 FALSE, all the entries are set to FAT_CLUSTER_AVAILABLE, effectively 4155 freeing all the clusters in the run. 4156 4157 Arguments: 4158 4159 Vcb - Supplies the Vcb to examine, yields 12/16 bit info, etc. 4160 4161 StartingFatIndex - Supplies the destination fat index. 4162 4163 ClusterCount - Supplies the number of contiguous clusters to work on. 4164 4165 ChainTogether - Tells us whether to fill the entries with links, or 4166 FAT_CLUSTER_AVAILABLE 4167 4168 4169 Return Value: 4170 4171 VOID 4172 4173 --*/ 4174 4175 { 4176 #define MAXCOUNTCLUS 0x10000 4177 #define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2) 4178 PBCB SavedBcbs[COUNTSAVEDBCBS][2]; 4179 4180 ULONG SectorSize; 4181 ULONG Cluster; 4182 4183 LBO StartSectorLbo; 4184 LBO FinalSectorLbo; 4185 LBO Lbo; 4186 4187 PVOID PinnedFat; 4188 4189 BOOLEAN ReleaseMutex = FALSE; 4190 4191 ULONG SavedStartingFatIndex = StartingFatIndex; 4192 4193 PAGED_CODE(); 4194 4195 DebugTrace(+1, Dbg, "FatSetFatRun\n", 0); 4196 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb); 4197 DebugTrace( 0, Dbg, " StartingFatIndex = %8x\n", StartingFatIndex); 4198 DebugTrace( 0, Dbg, " ClusterCount = %8lx\n", ClusterCount); 4199 DebugTrace( 0, Dbg, " ChainTogether = %s\n", ChainTogether ? "TRUE":"FALSE"); 4200 4201 // 4202 // Make sure they gave us a valid fat run. 4203 // 4204 4205 FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex); 4206 FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex + ClusterCount - 1); 4207 4208 // 4209 // Check special case 4210 // 4211 4212 if (ClusterCount == 0) { 4213 4214 DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0); 4215 return; 4216 } 4217 4218 // 4219 // Set Sector Size 4220 // 4221 4222 SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector; 4223 4224 // 4225 // Case on 12 or 16 bit fats. 4226 // 4227 // In the 12 bit case (mostly floppies) we always have the whole fat 4228 // (max 6k bytes) pinned during allocation operations. This is possibly 4229 // a wee bit slower, but saves headaches over fat entries with 8 bits 4230 // on one page, and 4 bits on the next. 4231 // 4232 // In the 16 bit case we only read one page at a time, as needed. 4233 // 4234 4235 // 4236 // DEAL WITH 12 BIT CASE 4237 // 4238 4239 _SEH2_TRY { 4240 4241 if (Vcb->AllocationSupport.FatIndexBitSize == 12) { 4242 4243 // 4244 // We read in the entire fat. Note that using prepare write marks 4245 // the bcb pre-dirty, so we don't have to do it explicitly. 4246 // 4247 4248 RtlZeroMemory( &SavedBcbs[0][0], 2 * sizeof(PBCB) * 2); 4249 4250 FatPrepareWriteVolumeFile( IrpContext, 4251 Vcb, 4252 FatReservedBytes( &Vcb->Bpb ), 4253 FatBytesPerFat( &Vcb->Bpb ), 4254 &SavedBcbs[0][0], 4255 &PinnedFat, 4256 TRUE, 4257 FALSE ); 4258 4259 // 4260 // Mark the affected sectors dirty. Note that FinalSectorLbo is 4261 // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure 4262 // we catch the case of a dirty fat entry straddling a sector boundry. 4263 // 4264 // Note that if the first AddMcbEntry succeeds, all following ones 4265 // will simply coalese, and thus also succeed. 4266 // 4267 4268 StartSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + StartingFatIndex * 3 / 2) 4269 & ~(SectorSize - 1); 4270 4271 FinalSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + ((StartingFatIndex + 4272 ClusterCount) * 3 + 2) / 2) & ~(SectorSize - 1); 4273 4274 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) { 4275 4276 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize ); 4277 } 4278 4279 // 4280 // Store the entries into the fat; we need a little 4281 // synchonization here and can't use a spinlock since the bytes 4282 // might not be resident. 4283 // 4284 4285 FatLockFreeClusterBitMap( Vcb ); 4286 ReleaseMutex = TRUE; 4287 4288 for (Cluster = StartingFatIndex; 4289 Cluster < StartingFatIndex + ClusterCount - 1; 4290 Cluster++) { 4291 4292 FatSet12BitEntry( PinnedFat, 4293 Cluster, 4294 ChainTogether ? Cluster + 1 : FAT_CLUSTER_AVAILABLE ); 4295 } 4296 4297 // 4298 // Save the last entry 4299 // 4300 4301 FatSet12BitEntry( PinnedFat, 4302 Cluster, 4303 ChainTogether ? 4304 FAT_CLUSTER_LAST & 0xfff : FAT_CLUSTER_AVAILABLE ); 4305 4306 FatUnlockFreeClusterBitMap( Vcb ); 4307 ReleaseMutex = FALSE; 4308 4309 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) { 4310 4311 // 4312 // DEAL WITH 32 BIT CASE 4313 // 4314 4315 for (;;) { 4316 4317 VBO StartOffsetInVolume; 4318 VBO FinalOffsetInVolume; 4319 4320 ULONG Page; 4321 ULONG FinalCluster; 4322 PULONG FatEntry = NULL; 4323 ULONG ClusterCountThisRun; 4324 4325 StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) + 4326 StartingFatIndex * sizeof(FAT_ENTRY); 4327 4328 if (ClusterCount > MAXCOUNTCLUS) { 4329 ClusterCountThisRun = MAXCOUNTCLUS; 4330 } else { 4331 ClusterCountThisRun = ClusterCount; 4332 } 4333 4334 FinalOffsetInVolume = StartOffsetInVolume + 4335 (ClusterCountThisRun - 1) * sizeof(FAT_ENTRY); 4336 4337 { 4338 ULONG NumberOfPages; 4339 ULONG Offset; 4340 4341 NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) - 4342 (StartOffsetInVolume / PAGE_SIZE) + 1; 4343 4344 RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 ); 4345 4346 for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1); 4347 Page < NumberOfPages; 4348 Page++, Offset += PAGE_SIZE ) { 4349 4350 FatPrepareWriteVolumeFile( IrpContext, 4351 Vcb, 4352 Offset, 4353 PAGE_SIZE, 4354 &SavedBcbs[Page][0], 4355 (PVOID *)&SavedBcbs[Page][1], 4356 TRUE, 4357 FALSE ); 4358 4359 if (Page == 0) { 4360 4361 FatEntry = (PULONG)((PUCHAR)SavedBcbs[0][1] + 4362 (StartOffsetInVolume % PAGE_SIZE)); 4363 } 4364 } 4365 } 4366 4367 // 4368 // Mark the run dirty 4369 // 4370 4371 StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1); 4372 FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1); 4373 4374 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) { 4375 4376 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO)Lbo, Lbo, SectorSize ); 4377 } 4378 4379 // 4380 // Store the entries 4381 // 4382 // We need extra synchronization here for broken architectures 4383 // like the ALPHA that don't support atomic 16 bit writes. 4384 // 4385 4386 #ifdef ALPHA 4387 FatLockFreeClusterBitMap( Vcb ); 4388 ReleaseMutex = TRUE; 4389 #endif // ALPHA 4390 4391 FinalCluster = StartingFatIndex + ClusterCountThisRun - 1; 4392 Page = 0; 4393 4394 for (Cluster = StartingFatIndex; 4395 Cluster <= FinalCluster; 4396 Cluster++, FatEntry++) { 4397 4398 // 4399 // If we just crossed a page boundry (as opposed to starting 4400 // on one), update our idea of FatEntry. 4401 4402 if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) && 4403 (Cluster != StartingFatIndex) ) { 4404 4405 Page += 1; 4406 FatEntry = (PULONG)SavedBcbs[Page][1]; 4407 } 4408 4409 *FatEntry = ChainTogether ? (FAT_ENTRY)(Cluster + 1) : 4410 FAT_CLUSTER_AVAILABLE; 4411 } 4412 4413 // 4414 // Fix up the last entry if we were chaining together 4415 // 4416 4417 if ((ClusterCount <= MAXCOUNTCLUS) && 4418 ChainTogether ) { 4419 4420 *(FatEntry-1) = FAT_CLUSTER_LAST; 4421 } 4422 4423 #ifdef ALPHA 4424 FatUnlockFreeClusterBitMap( Vcb ); 4425 ReleaseMutex = FALSE; 4426 #endif // ALPHA 4427 4428 { 4429 ULONG i; 4430 4431 // 4432 // Unpin the Bcbs 4433 // 4434 4435 for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) { 4436 4437 FatUnpinBcb( IrpContext, SavedBcbs[i][0] ); 4438 SavedBcbs[i][0] = NULL; 4439 } 4440 } 4441 4442 if (ClusterCount <= MAXCOUNTCLUS) { 4443 4444 break; 4445 4446 } else { 4447 4448 StartingFatIndex += MAXCOUNTCLUS; 4449 ClusterCount -= MAXCOUNTCLUS; 4450 } 4451 } 4452 4453 } else { 4454 4455 // 4456 // DEAL WITH 16 BIT CASE 4457 // 4458 4459 VBO StartOffsetInVolume; 4460 VBO FinalOffsetInVolume; 4461 4462 ULONG Page; 4463 ULONG FinalCluster; 4464 PUSHORT FatEntry = NULL; 4465 4466 StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) + 4467 StartingFatIndex * sizeof(USHORT); 4468 4469 FinalOffsetInVolume = StartOffsetInVolume + 4470 (ClusterCount - 1) * sizeof(USHORT); 4471 4472 // 4473 // Read in one page of fat at a time. We cannot read in the 4474 // all of the fat we need because of cache manager limitations. 4475 // 4476 // SavedBcb was initialized to be able to hold the largest 4477 // possible number of pages in a fat plus and extra one to 4478 // accomadate the boot sector, plus one more to make sure there 4479 // is enough room for the RtlZeroMemory below that needs the mark 4480 // the first Bcb after all the ones we will use as an end marker. 4481 // 4482 4483 { 4484 ULONG NumberOfPages; 4485 ULONG Offset; 4486 4487 NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) - 4488 (StartOffsetInVolume / PAGE_SIZE) + 1; 4489 4490 RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 ); 4491 4492 for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1); 4493 Page < NumberOfPages; 4494 Page++, Offset += PAGE_SIZE ) { 4495 4496 FatPrepareWriteVolumeFile( IrpContext, 4497 Vcb, 4498 Offset, 4499 PAGE_SIZE, 4500 &SavedBcbs[Page][0], 4501 (PVOID *)&SavedBcbs[Page][1], 4502 TRUE, 4503 FALSE ); 4504 4505 if (Page == 0) { 4506 4507 FatEntry = (PUSHORT)((PUCHAR)SavedBcbs[0][1] + 4508 (StartOffsetInVolume % PAGE_SIZE)); 4509 } 4510 } 4511 } 4512 4513 // 4514 // Mark the run dirty 4515 // 4516 4517 StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1); 4518 FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1); 4519 4520 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) { 4521 4522 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize ); 4523 } 4524 4525 // 4526 // Store the entries 4527 // 4528 // We need extra synchronization here for broken architectures 4529 // like the ALPHA that don't support atomic 16 bit writes. 4530 // 4531 4532 #ifdef ALPHA 4533 FatLockFreeClusterBitMap( Vcb ); 4534 ReleaseMutex = TRUE; 4535 #endif // ALPHA 4536 4537 FinalCluster = StartingFatIndex + ClusterCount - 1; 4538 Page = 0; 4539 4540 for (Cluster = StartingFatIndex; 4541 Cluster <= FinalCluster; 4542 Cluster++, FatEntry++) { 4543 4544 // 4545 // If we just crossed a page boundry (as opposed to starting 4546 // on one), update our idea of FatEntry. 4547 4548 if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) && 4549 (Cluster != StartingFatIndex) ) { 4550 4551 Page += 1; 4552 FatEntry = (PUSHORT)SavedBcbs[Page][1]; 4553 } 4554 4555 *FatEntry = (USHORT) (ChainTogether ? (FAT_ENTRY)(Cluster + 1) : 4556 FAT_CLUSTER_AVAILABLE); 4557 } 4558 4559 // 4560 // Fix up the last entry if we were chaining together 4561 // 4562 4563 if ( ChainTogether ) { 4564 4565 #ifdef _MSC_VER 4566 #pragma warning( suppress: 4310 ) 4567 #endif 4568 *(FatEntry-1) = (USHORT)FAT_CLUSTER_LAST; 4569 4570 } 4571 #ifdef ALPHA 4572 FatUnlockFreeClusterBitMap( Vcb ); 4573 ReleaseMutex = FALSE; 4574 #endif // ALPHA 4575 } 4576 4577 } _SEH2_FINALLY { 4578 4579 ULONG i; 4580 4581 DebugUnwind( FatSetFatRun ); 4582 4583 // 4584 // If we still somehow have the Mutex, release it. 4585 // 4586 4587 if (ReleaseMutex) { 4588 4589 NT_ASSERT( _SEH2_AbnormalTermination() ); 4590 4591 FatUnlockFreeClusterBitMap( Vcb ); 4592 } 4593 4594 // 4595 // Unpin the Bcbs 4596 // 4597 4598 for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) { 4599 4600 FatUnpinBcb( IrpContext, SavedBcbs[i][0] ); 4601 } 4602 4603 // 4604 // At this point nothing in this finally clause should have raised. 4605 // So, now comes the unsafe (sigh) stuff. 4606 // 4607 4608 if ( _SEH2_AbnormalTermination() && 4609 (Vcb->AllocationSupport.FatIndexBitSize == 32) ) { 4610 4611 // 4612 // Fat32 unwind 4613 // 4614 // This case is more complex because the FAT12 and FAT16 cases 4615 // pin all the needed FAT pages (128K max), after which it 4616 // can't fail, before changing any FAT entries. In the Fat32 4617 // case, it may not be practical to pin all the needed FAT 4618 // pages, because that could span many megabytes. So Fat32 4619 // attacks in chunks, and if a failure occurs once the first 4620 // chunk has been updated, we have to back out the updates. 4621 // 4622 // The unwind consists of walking back over each FAT entry we 4623 // have changed, setting it back to the previous value. Note 4624 // that the previous value with either be FAT_CLUSTER_AVAILABLE 4625 // (if ChainTogether==TRUE) or a simple link to the successor 4626 // (if ChainTogether==FALSE). 4627 // 4628 // We concede that any one of these calls could fail too; our 4629 // objective is to make this case no more likely than the case 4630 // for a file consisting of multiple disjoint runs. 4631 // 4632 4633 while ( StartingFatIndex > SavedStartingFatIndex ) { 4634 4635 StartingFatIndex--; 4636 4637 FatSetFatEntry( IrpContext, Vcb, StartingFatIndex, 4638 ChainTogether ? 4639 StartingFatIndex + 1 : FAT_CLUSTER_AVAILABLE ); 4640 } 4641 } 4642 4643 DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0); 4644 } _SEH2_END; 4645 4646 return; 4647 } 4648 4649 4650 // 4651 // Internal support routine 4652 // 4653 4654 UCHAR 4655 FatLogOf ( 4656 IN ULONG Value 4657 ) 4658 4659 /*++ 4660 4661 Routine Description: 4662 4663 This routine just computes the base 2 log of an integer. It is only used 4664 on objects that are know to be powers of two. 4665 4666 Arguments: 4667 4668 Value - The value to take the base 2 log of. 4669 4670 Return Value: 4671 4672 UCHAR - The base 2 log of Value. 4673 4674 --*/ 4675 4676 { 4677 UCHAR Log = 0; 4678 4679 #if FASTFATDBG 4680 ULONG OrigValue = Value; 4681 #endif 4682 4683 PAGED_CODE(); 4684 4685 // 4686 // Knock bits off until we we get a one at position 0 4687 // 4688 4689 while ( (Value & 0xfffffffe) != 0 ) { 4690 4691 Log++; 4692 Value >>= 1; 4693 } 4694 4695 // 4696 // If there was more than one bit set, the file system messed up, 4697 // Bug Check. 4698 // 4699 4700 if (Value != 0x1) { 4701 4702 DebugTrace(+1, Dbg, "LogOf\n", 0); 4703 DebugTrace( 0, Dbg, " Value = %8lx\n", OrigValue); 4704 4705 DebugTrace( 0, Dbg, "Received non power of 2.\n", 0); 4706 4707 DebugTrace(-1, Dbg, "LogOf -> %8lx\n", Log); 4708 4709 #ifdef _MSC_VER 4710 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" ) 4711 #endif 4712 FatBugCheck( Value, Log, 0 ); 4713 } 4714 4715 return Log; 4716 } 4717 4718 4719 VOID 4720 FatExamineFatEntries( 4721 IN PIRP_CONTEXT IrpContext, 4722 IN PVCB Vcb, 4723 IN ULONG StartIndex OPTIONAL, 4724 IN ULONG EndIndex OPTIONAL, 4725 IN BOOLEAN SetupWindows, 4726 IN PFAT_WINDOW SwitchToWindow OPTIONAL, 4727 IN PULONG BitMapBuffer OPTIONAL 4728 ) 4729 /*++ 4730 4731 Routine Description: 4732 4733 This routine handles scanning a segment of the FAT into in-memory structures. 4734 4735 There are three fundamental cases, with variations depending on the FAT type: 4736 4737 1) During volume setup, FatSetupAllocations 4738 4739 1a) for FAT12/16, read the FAT into our free clusterbitmap 4740 1b) for FAT32, perform the initial scan for window free cluster counts 4741 4742 2) Switching FAT32 windows on the fly during system operation 4743 4744 3) Reading arbitrary segments of the FAT for the purposes of the GetVolumeBitmap 4745 call (only for FAT32) 4746 4747 There really is too much going on in here. At some point this should be 4748 substantially rewritten. 4749 4750 Arguments: 4751 4752 Vcb - Supplies the volume involved 4753 4754 StartIndex - Supplies the starting cluster, ignored if SwitchToWindow supplied 4755 4756 EndIndex - Supplies the ending cluster, ignored if SwitchToWindow supplied 4757 4758 SetupWindows - Indicates if we are doing the initial FAT32 scan 4759 4760 SwitchToWindow - Supplies the FAT window we are examining and will switch to 4761 4762 BitMapBuffer - Supplies a specific bitmap to fill in, if not supplied we fill 4763 in the volume free cluster bitmap if !SetupWindows 4764 4765 Return Value: 4766 4767 None. Lots of side effects. 4768 4769 --*/ 4770 { 4771 ULONG FatIndexBitSize; 4772 ULONG Page = 0; 4773 ULONG Offset = 0; 4774 ULONG FatIndex; 4775 FAT_ENTRY FatEntry = FAT_CLUSTER_AVAILABLE; 4776 FAT_ENTRY FirstFatEntry = FAT_CLUSTER_AVAILABLE; 4777 PUSHORT FatBuffer; 4778 PVOID pv; 4779 PBCB Bcb = NULL; 4780 ULONG EntriesPerWindow; 4781 4782 ULONG ClustersThisRun; 4783 ULONG StartIndexOfThisRun; 4784 4785 PULONG FreeClusterCount = NULL; 4786 4787 PFAT_WINDOW CurrentWindow = NULL; 4788 4789 PVOID NewBitMapBuffer = NULL; 4790 PRTL_BITMAP BitMap = NULL; 4791 RTL_BITMAP PrivateBitMap; 4792 4793 ULONG ClusterSize = 0; 4794 ULONG PrefetchPages = 0; 4795 ULONG FatPages = 0; 4796 4797 VBO BadClusterVbo = 0; 4798 LBO Lbo = 0; 4799 4800 enum RunType { 4801 FreeClusters, 4802 AllocatedClusters, 4803 UnknownClusters 4804 } CurrentRun; 4805 4806 PAGED_CODE(); 4807 4808 // 4809 // Now assert correct usage. 4810 // 4811 4812 FatIndexBitSize = Vcb->AllocationSupport.FatIndexBitSize; 4813 4814 NT_ASSERT( !(SetupWindows && (SwitchToWindow || BitMapBuffer))); 4815 NT_ASSERT( !(SetupWindows && FatIndexBitSize != 32)); 4816 4817 if (Vcb->NumberOfWindows > 1) { 4818 4819 // 4820 // FAT32: Calculate the number of FAT entries covered by a window. This is 4821 // equal to the number of bits in the freespace bitmap, the size of which 4822 // is hardcoded. 4823 // 4824 4825 EntriesPerWindow = MAX_CLUSTER_BITMAP_SIZE; 4826 4827 } else { 4828 4829 EntriesPerWindow = Vcb->AllocationSupport.NumberOfClusters; 4830 } 4831 4832 // 4833 // We will also fill in the cumulative count of free clusters for 4834 // the entire volume. If this is not appropriate, NULL it out 4835 // shortly. 4836 // 4837 4838 FreeClusterCount = &Vcb->AllocationSupport.NumberOfFreeClusters; 4839 4840 if (SetupWindows) { 4841 4842 NT_ASSERT(BitMapBuffer == NULL); 4843 4844 // 4845 // In this case we're just supposed to scan the fat and set up 4846 // the information regarding where the buckets fall and how many 4847 // free clusters are in each. 4848 // 4849 // It is fine to monkey with the real windows, we must be able 4850 // to do this to activate the volume. 4851 // 4852 4853 BitMap = NULL; 4854 4855 CurrentWindow = &Vcb->Windows[0]; 4856 CurrentWindow->FirstCluster = StartIndex; 4857 CurrentWindow->ClustersFree = 0; 4858 4859 // 4860 // We always wish to calculate total free clusters when 4861 // setting up the FAT windows. 4862 // 4863 4864 } else if (BitMapBuffer == NULL) { 4865 4866 // 4867 // We will be filling in the free cluster bitmap for the volume. 4868 // Careful, we can raise out of here and be hopelessly hosed if 4869 // we built this up in the main bitmap/window itself. 4870 // 4871 // For simplicity's sake, we'll do the swap for everyone. FAT32 4872 // provokes the need since we can't tolerate partial results 4873 // when switching windows. 4874 // 4875 4876 NT_ASSERT( SwitchToWindow ); 4877 4878 CurrentWindow = SwitchToWindow; 4879 StartIndex = CurrentWindow->FirstCluster; 4880 EndIndex = CurrentWindow->LastCluster; 4881 4882 BitMap = &PrivateBitMap; 4883 NewBitMapBuffer = FsRtlAllocatePoolWithTag( PagedPool, 4884 (EntriesPerWindow + 7) / 8, 4885 TAG_FAT_BITMAP ); 4886 4887 RtlInitializeBitMap( &PrivateBitMap, 4888 NewBitMapBuffer, 4889 EndIndex - StartIndex + 1); 4890 4891 if ((FatIndexBitSize == 32) && 4892 (Vcb->NumberOfWindows > 1)) { 4893 4894 // 4895 // We do not wish count total clusters here. 4896 // 4897 4898 FreeClusterCount = NULL; 4899 4900 } 4901 4902 } else { 4903 4904 BitMap = &PrivateBitMap; 4905 RtlInitializeBitMap(&PrivateBitMap, 4906 BitMapBuffer, 4907 EndIndex - StartIndex + 1); 4908 4909 // 4910 // We do not count total clusters here. 4911 // 4912 4913 FreeClusterCount = NULL; 4914 } 4915 4916 // 4917 // Now, our start index better be in the file heap. 4918 // 4919 4920 NT_ASSERT( StartIndex >= 2 ); 4921 4922 _SEH2_TRY { 4923 4924 // 4925 // Pick up the initial chunk of the FAT and first entry. 4926 // 4927 4928 if (FatIndexBitSize == 12) { 4929 4930 // 4931 // We read in the entire fat in the 12 bit case. 4932 // 4933 4934 FatReadVolumeFile( IrpContext, 4935 Vcb, 4936 FatReservedBytes( &Vcb->Bpb ), 4937 FatBytesPerFat( &Vcb->Bpb ), 4938 &Bcb, 4939 (PVOID *)&FatBuffer ); 4940 4941 FatLookup12BitEntry(FatBuffer, 0, &FirstFatEntry); 4942 4943 } else { 4944 4945 // 4946 // Read in one page of fat at a time. We cannot read in the 4947 // all of the fat we need because of cache manager limitations. 4948 // 4949 4950 ULONG BytesPerEntry = FatIndexBitSize >> 3; 4951 4952 FatPages = (FatReservedBytes(&Vcb->Bpb) + FatBytesPerFat(&Vcb->Bpb) + (PAGE_SIZE - 1)) / PAGE_SIZE; 4953 Page = (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) / PAGE_SIZE; 4954 4955 Offset = Page * PAGE_SIZE; 4956 4957 // 4958 // Prefetch the FAT entries in memory for optimal performance. 4959 // 4960 4961 PrefetchPages = FatPages - Page; 4962 4963 if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) { 4964 4965 PrefetchPages = ALIGN_UP_BY(Page, FAT_PREFETCH_PAGE_COUNT) - Page; 4966 } 4967 4968 #if (NTDDI_VERSION >= NTDDI_WIN8) 4969 FatPrefetchPages( IrpContext, 4970 Vcb->VirtualVolumeFile, 4971 Page, 4972 PrefetchPages ); 4973 #endif 4974 4975 FatReadVolumeFile( IrpContext, 4976 Vcb, 4977 Offset, 4978 PAGE_SIZE, 4979 &Bcb, 4980 &pv); 4981 4982 if (FatIndexBitSize == 32) { 4983 4984 FatBuffer = (PUSHORT)((PUCHAR)pv + 4985 (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) % 4986 PAGE_SIZE); 4987 4988 FirstFatEntry = *((PULONG)FatBuffer); 4989 FirstFatEntry = FirstFatEntry & FAT32_ENTRY_MASK; 4990 4991 } else { 4992 4993 FatBuffer = (PUSHORT)((PUCHAR)pv + 4994 FatReservedBytes(&Vcb->Bpb) % PAGE_SIZE) + 2; 4995 4996 FirstFatEntry = *FatBuffer; 4997 } 4998 4999 } 5000 5001 ClusterSize = 1 << (Vcb->AllocationSupport.LogOfBytesPerCluster); 5002 5003 CurrentRun = (FirstFatEntry == FAT_CLUSTER_AVAILABLE) ? 5004 FreeClusters : AllocatedClusters; 5005 5006 StartIndexOfThisRun = StartIndex; 5007 5008 for (FatIndex = StartIndex; FatIndex <= EndIndex; FatIndex++) { 5009 5010 if (FatIndexBitSize == 12) { 5011 5012 FatLookup12BitEntry(FatBuffer, FatIndex, &FatEntry); 5013 5014 } else { 5015 5016 // 5017 // If we are setting up the FAT32 windows and have stepped into a new 5018 // bucket, finalize this one and move forward. 5019 // 5020 5021 if (SetupWindows && 5022 FatIndex > StartIndex && 5023 (FatIndex - 2) % EntriesPerWindow == 0) { 5024 5025 CurrentWindow->LastCluster = FatIndex - 1; 5026 5027 if (CurrentRun == FreeClusters) { 5028 5029 // 5030 // We must be counting clusters in order to modify the 5031 // contents of the window. 5032 // 5033 5034 NT_ASSERT( FreeClusterCount ); 5035 5036 ClustersThisRun = FatIndex - StartIndexOfThisRun; 5037 CurrentWindow->ClustersFree += ClustersThisRun; 5038 5039 if (FreeClusterCount) { 5040 *FreeClusterCount += ClustersThisRun; 5041 } 5042 5043 } else { 5044 5045 NT_ASSERT(CurrentRun == AllocatedClusters); 5046 5047 } 5048 5049 StartIndexOfThisRun = FatIndex; 5050 CurrentRun = UnknownClusters; 5051 5052 CurrentWindow++; 5053 CurrentWindow->ClustersFree = 0; 5054 CurrentWindow->FirstCluster = FatIndex; 5055 } 5056 5057 // 5058 // If we just stepped onto a new page, grab a new pointer. 5059 // 5060 5061 if (((ULONG_PTR)FatBuffer & (PAGE_SIZE - 1)) == 0) { 5062 5063 FatUnpinBcb( IrpContext, Bcb ); 5064 5065 Page++; 5066 Offset += PAGE_SIZE; 5067 5068 #if (NTDDI_VERSION >= NTDDI_WIN8) 5069 // 5070 // If we have exhausted all the prefetch pages, prefetch the next chunk. 5071 // 5072 5073 if (--PrefetchPages == 0) { 5074 5075 PrefetchPages = FatPages - Page; 5076 5077 if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) { 5078 5079 PrefetchPages = FAT_PREFETCH_PAGE_COUNT; 5080 } 5081 5082 FatPrefetchPages( IrpContext, 5083 Vcb->VirtualVolumeFile, 5084 Page, 5085 PrefetchPages ); 5086 } 5087 #endif 5088 5089 FatReadVolumeFile( IrpContext, 5090 Vcb, 5091 Offset, 5092 PAGE_SIZE, 5093 &Bcb, 5094 &pv ); 5095 5096 FatBuffer = (PUSHORT)pv; 5097 } 5098 5099 if (FatIndexBitSize == 32) { 5100 5101 #ifndef __REACTOS__ 5102 #ifdef _MSC_VER 5103 #pragma warning( suppress: 4213 ) 5104 #endif 5105 FatEntry = *((PULONG)FatBuffer)++; 5106 FatEntry = FatEntry & FAT32_ENTRY_MASK; 5107 #else 5108 FatEntry = *((PULONG)FatBuffer); 5109 FatBuffer += 2; /* PUSHORT FatBuffer */ 5110 FatEntry = FatEntry & FAT32_ENTRY_MASK; 5111 #endif 5112 5113 } else { 5114 5115 FatEntry = *FatBuffer; 5116 FatBuffer += 1; 5117 } 5118 } 5119 5120 if (CurrentRun == UnknownClusters) { 5121 5122 CurrentRun = (FatEntry == FAT_CLUSTER_AVAILABLE) ? 5123 FreeClusters : AllocatedClusters; 5124 } 5125 5126 // 5127 // Are we switching from a free run to an allocated run? 5128 // 5129 5130 if (CurrentRun == FreeClusters && 5131 FatEntry != FAT_CLUSTER_AVAILABLE) { 5132 5133 ClustersThisRun = FatIndex - StartIndexOfThisRun; 5134 5135 if (FreeClusterCount) { 5136 5137 *FreeClusterCount += ClustersThisRun; 5138 CurrentWindow->ClustersFree += ClustersThisRun; 5139 } 5140 5141 if (BitMap) { 5142 5143 RtlClearBits( BitMap, 5144 StartIndexOfThisRun - StartIndex, 5145 ClustersThisRun ); 5146 } 5147 5148 CurrentRun = AllocatedClusters; 5149 StartIndexOfThisRun = FatIndex; 5150 } 5151 5152 // 5153 // Are we switching from an allocated run to a free run? 5154 // 5155 5156 if (CurrentRun == AllocatedClusters && 5157 FatEntry == FAT_CLUSTER_AVAILABLE) { 5158 5159 ClustersThisRun = FatIndex - StartIndexOfThisRun; 5160 5161 if (BitMap) { 5162 5163 RtlSetBits( BitMap, 5164 StartIndexOfThisRun - StartIndex, 5165 ClustersThisRun ); 5166 } 5167 5168 CurrentRun = FreeClusters; 5169 StartIndexOfThisRun = FatIndex; 5170 } 5171 5172 // 5173 // If the entry is marked bad, add it to the bad block MCB 5174 // 5175 5176 if ((SetupWindows || (Vcb->NumberOfWindows == 1)) && 5177 (FatInterpretClusterType( Vcb, FatEntry ) == FatClusterBad)) { 5178 5179 // 5180 // This cluster is marked bad. 5181 // Add it to the BadBlockMcb. 5182 // 5183 5184 Lbo = FatGetLboFromIndex( Vcb, FatIndex ); 5185 FatAddMcbEntry( Vcb, &Vcb->BadBlockMcb, BadClusterVbo, Lbo, ClusterSize ); 5186 BadClusterVbo += ClusterSize; 5187 } 5188 } 5189 5190 // 5191 // If we finished the scan, then we know about all the possible bad clusters. 5192 // 5193 5194 SetFlag( Vcb->VcbState, VCB_STATE_FLAG_BAD_BLOCKS_POPULATED); 5195 5196 // 5197 // Now we have to record the final run we encountered 5198 // 5199 5200 ClustersThisRun = FatIndex - StartIndexOfThisRun; 5201 5202 if (CurrentRun == FreeClusters) { 5203 5204 if (FreeClusterCount) { 5205 5206 *FreeClusterCount += ClustersThisRun; 5207 CurrentWindow->ClustersFree += ClustersThisRun; 5208 } 5209 5210 if (BitMap) { 5211 5212 RtlClearBits( BitMap, 5213 StartIndexOfThisRun - StartIndex, 5214 ClustersThisRun ); 5215 } 5216 5217 } else { 5218 5219 if (BitMap) { 5220 5221 RtlSetBits( BitMap, 5222 StartIndexOfThisRun - StartIndex, 5223 ClustersThisRun ); 5224 } 5225 } 5226 5227 // 5228 // And finish the last window if we are in setup. 5229 // 5230 5231 if (SetupWindows) { 5232 5233 CurrentWindow->LastCluster = FatIndex - 1; 5234 } 5235 5236 // 5237 // Now switch the active window if required. We've succesfully gotten everything 5238 // nailed down. 5239 // 5240 // If we were tracking the free cluster count, this means we should update the 5241 // window. This is the case of FAT12/16 initialization. 5242 // 5243 5244 if (SwitchToWindow) { 5245 5246 if (Vcb->FreeClusterBitMap.Buffer) { 5247 5248 ExFreePool( Vcb->FreeClusterBitMap.Buffer ); 5249 } 5250 5251 RtlInitializeBitMap( &Vcb->FreeClusterBitMap, 5252 NewBitMapBuffer, 5253 EndIndex - StartIndex + 1 ); 5254 5255 NewBitMapBuffer = NULL; 5256 5257 Vcb->CurrentWindow = SwitchToWindow; 5258 Vcb->ClusterHint = (ULONG)-1; 5259 5260 if (FreeClusterCount) { 5261 5262 NT_ASSERT( !SetupWindows ); 5263 5264 Vcb->CurrentWindow->ClustersFree = *FreeClusterCount; 5265 } 5266 } 5267 5268 // 5269 // Make sure plausible things occured ... 5270 // 5271 5272 if (!SetupWindows && BitMapBuffer == NULL) { 5273 5274 ASSERT_CURRENT_WINDOW_GOOD( Vcb ); 5275 } 5276 5277 NT_ASSERT(Vcb->AllocationSupport.NumberOfFreeClusters <= Vcb->AllocationSupport.NumberOfClusters); 5278 5279 } _SEH2_FINALLY { 5280 5281 // 5282 // Unpin the last bcb and drop the temporary bitmap buffer if it exists. 5283 // 5284 5285 FatUnpinBcb( IrpContext, Bcb); 5286 5287 if (NewBitMapBuffer) { 5288 5289 ExFreePool( NewBitMapBuffer ); 5290 } 5291 } _SEH2_END; 5292 } 5293 5294