1 /*++ 2 3 Copyright (c) 1989-2000 Microsoft Corporation 4 5 Module Name: 6 7 Write.c 8 9 Abstract: 10 11 This module implements the File Write routine for Write called by the 12 dispatch driver. 13 14 15 --*/ 16 17 #include "fatprocs.h" 18 19 // 20 // The Bug check file id for this module 21 // 22 23 #define BugCheckFileId (FAT_BUG_CHECK_WRITE) 24 25 // 26 // The local debug trace level 27 // 28 29 #define Dbg (DEBUG_TRACE_WRITE) 30 31 // 32 // Macros to increment the appropriate performance counters. 33 // 34 35 #define CollectWriteStats(VCB,OPEN_TYPE,BYTE_COUNT) { \ 36 PFILESYSTEM_STATISTICS Stats = &(VCB)->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors].Common; \ 37 if (((OPEN_TYPE) == UserFileOpen)) { \ 38 Stats->UserFileWrites += 1; \ 39 Stats->UserFileWriteBytes += (ULONG)(BYTE_COUNT); \ 40 } else if (((OPEN_TYPE) == VirtualVolumeFile || ((OPEN_TYPE) == DirectoryFile))) { \ 41 Stats->MetaDataWrites += 1; \ 42 Stats->MetaDataWriteBytes += (ULONG)(BYTE_COUNT); \ 43 } \ 44 } 45 46 BOOLEAN FatNoAsync = FALSE; 47 48 // 49 // Local support routines 50 // 51 52 KDEFERRED_ROUTINE FatDeferredFlushDpc; 53 54 VOID 55 NTAPI 56 FatDeferredFlushDpc ( 57 _In_ PKDPC Dpc, 58 _In_opt_ PVOID DeferredContext, 59 _In_opt_ PVOID SystemArgument1, 60 _In_opt_ PVOID SystemArgument2 61 ); 62 63 WORKER_THREAD_ROUTINE FatDeferredFlush; 64 65 VOID 66 NTAPI 67 FatDeferredFlush ( 68 _In_ PVOID Parameter 69 ); 70 71 #ifdef ALLOC_PRAGMA 72 #pragma alloc_text(PAGE, FatDeferredFlush) 73 #pragma alloc_text(PAGE, FatCommonWrite) 74 #endif 75 76 77 _Function_class_(IRP_MJ_WRITE) 78 _Function_class_(DRIVER_DISPATCH) 79 NTSTATUS 80 NTAPI 81 FatFsdWrite ( 82 _In_ PVOLUME_DEVICE_OBJECT VolumeDeviceObject, 83 _Inout_ PIRP Irp 84 ) 85 86 /*++ 87 88 Routine Description: 89 90 This routine implements the FSD part of the NtWriteFile API call 91 92 Arguments: 93 94 VolumeDeviceObject - Supplies the volume device object where the 95 file being Write exists 96 97 Irp - Supplies the Irp being processed 98 99 Return Value: 100 101 NTSTATUS - The FSD status for the IRP 102 103 --*/ 104 105 { 106 PFCB Fcb; 107 NTSTATUS Status; 108 PIRP_CONTEXT IrpContext = NULL; 109 110 BOOLEAN ModWriter = FALSE; 111 BOOLEAN TopLevel = FALSE; 112 113 DebugTrace(+1, Dbg, "FatFsdWrite\n", 0); 114 115 // 116 // Call the common Write routine, with blocking allowed if synchronous 117 // 118 119 FsRtlEnterFileSystem(); 120 121 // 122 // We are first going to do a quick check for paging file IO. Since this 123 // is a fast path, we must replicate the check for the fsdo. 124 // 125 126 if (!FatDeviceIsFatFsdo( IoGetCurrentIrpStackLocation(Irp)->DeviceObject)) { 127 128 Fcb = (PFCB)(IoGetCurrentIrpStackLocation(Irp)->FileObject->FsContext); 129 130 if ((NodeType(Fcb) == FAT_NTC_FCB) && 131 FlagOn(Fcb->FcbState, FCB_STATE_PAGING_FILE)) { 132 133 // 134 // Do the usual STATUS_PENDING things. 135 // 136 137 IoMarkIrpPending( Irp ); 138 139 // 140 // Perform the actual IO, it will be completed when the io finishes. 141 // 142 143 FatPagingFileIo( Irp, Fcb ); 144 145 FsRtlExitFileSystem(); 146 147 return STATUS_PENDING; 148 } 149 } 150 151 _SEH2_TRY { 152 153 TopLevel = FatIsIrpTopLevel( Irp ); 154 155 IrpContext = FatCreateIrpContext( Irp, CanFsdWait( Irp ) ); 156 157 // 158 // This is a kludge for the mod writer case. The correct state 159 // of recursion is set in IrpContext, however, we much with the 160 // actual top level Irp field to get the correct WriteThrough 161 // behaviour. 162 // 163 164 if (IoGetTopLevelIrp() == (PIRP)FSRTL_MOD_WRITE_TOP_LEVEL_IRP) { 165 166 ModWriter = TRUE; 167 168 IoSetTopLevelIrp( Irp ); 169 } 170 171 // 172 // If this is an Mdl complete request, don't go through 173 // common write. 174 // 175 176 if (FlagOn( IrpContext->MinorFunction, IRP_MN_COMPLETE )) { 177 178 DebugTrace(0, Dbg, "Calling FatCompleteMdl\n", 0 ); 179 Status = FatCompleteMdl( IrpContext, Irp ); 180 181 } else { 182 183 Status = FatCommonWrite( IrpContext, Irp ); 184 } 185 186 } _SEH2_EXCEPT(FatExceptionFilter( IrpContext, _SEH2_GetExceptionInformation() )) { 187 188 // 189 // We had some trouble trying to perform the requested 190 // operation, so we'll abort the I/O request with 191 // the error status that we get back from the 192 // execption code 193 // 194 195 Status = FatProcessException( IrpContext, Irp, _SEH2_GetExceptionCode() ); 196 } _SEH2_END; 197 198 // NT_ASSERT( !(ModWriter && (Status == STATUS_CANT_WAIT)) ); 199 200 NT_ASSERT( !(ModWriter && TopLevel) ); 201 202 if (ModWriter) { IoSetTopLevelIrp((PIRP)FSRTL_MOD_WRITE_TOP_LEVEL_IRP); } 203 204 if (TopLevel) { IoSetTopLevelIrp( NULL ); } 205 206 FsRtlExitFileSystem(); 207 208 // 209 // And return to our caller 210 // 211 212 DebugTrace(-1, Dbg, "FatFsdWrite -> %08lx\n", Status); 213 214 UNREFERENCED_PARAMETER( VolumeDeviceObject ); 215 216 return Status; 217 } 218 219 220 _Requires_lock_held_(_Global_critical_region_) 221 NTSTATUS 222 FatCommonWrite ( 223 IN PIRP_CONTEXT IrpContext, 224 IN PIRP Irp 225 ) 226 227 /*++ 228 229 Routine Description: 230 231 This is the common write routine for NtWriteFile, called from both 232 the Fsd, or from the Fsp if a request could not be completed without 233 blocking in the Fsd. This routine's actions are 234 conditionalized by the Wait input parameter, which determines whether 235 it is allowed to block or not. If a blocking condition is encountered 236 with Wait == FALSE, however, the request is posted to the Fsp, who 237 always calls with WAIT == TRUE. 238 239 Arguments: 240 241 Irp - Supplies the Irp to process 242 243 Return Value: 244 245 NTSTATUS - The return status for the operation 246 247 --*/ 248 249 { 250 PVCB Vcb; 251 PFCB FcbOrDcb; 252 PCCB Ccb; 253 254 VBO StartingVbo; 255 ULONG ByteCount; 256 ULONG FileSize = 0; 257 ULONG InitialFileSize = 0; 258 ULONG InitialValidDataLength = 0; 259 260 PIO_STACK_LOCATION IrpSp; 261 PFILE_OBJECT FileObject; 262 TYPE_OF_OPEN TypeOfOpen; 263 264 BOOLEAN PostIrp = FALSE; 265 BOOLEAN OplockPostIrp = FALSE; 266 BOOLEAN ExtendingFile = FALSE; 267 BOOLEAN FcbOrDcbAcquired = FALSE; 268 BOOLEAN SwitchBackToAsync = FALSE; 269 BOOLEAN CalledByLazyWriter = FALSE; 270 BOOLEAN ExtendingValidData = FALSE; 271 BOOLEAN FcbAcquiredExclusive = FALSE; 272 BOOLEAN FcbCanDemoteToShared = FALSE; 273 BOOLEAN WriteFileSizeToDirent = FALSE; 274 BOOLEAN RecursiveWriteThrough = FALSE; 275 BOOLEAN UnwindOutstandingAsync = FALSE; 276 BOOLEAN PagingIoResourceAcquired = FALSE; 277 BOOLEAN SuccessfulPurge = FALSE; 278 279 BOOLEAN SynchronousIo; 280 BOOLEAN WriteToEof; 281 BOOLEAN PagingIo; 282 BOOLEAN NonCachedIo; 283 BOOLEAN Wait; 284 NTSTATUS Status = STATUS_SUCCESS; 285 286 FAT_IO_CONTEXT StackFatIoContext; 287 288 // 289 // A system buffer is only used if we have to access the buffer directly 290 // from the Fsp to clear a portion or to do a synchronous I/O, or a 291 // cached transfer. It is possible that our caller may have already 292 // mapped a system buffer, in which case we must remember this so 293 // we do not unmap it on the way out. 294 // 295 296 PVOID SystemBuffer = (PVOID) NULL; 297 298 LARGE_INTEGER StartingByte; 299 300 PAGED_CODE(); 301 302 // 303 // Get current Irp stack location and file object 304 // 305 306 IrpSp = IoGetCurrentIrpStackLocation( Irp ); 307 FileObject = IrpSp->FileObject; 308 309 310 DebugTrace(+1, Dbg, "FatCommonWrite\n", 0); 311 DebugTrace( 0, Dbg, "Irp = %p\n", Irp); 312 DebugTrace( 0, Dbg, "ByteCount = %8lx\n", IrpSp->Parameters.Write.Length); 313 DebugTrace( 0, Dbg, "ByteOffset.LowPart = %8lx\n", IrpSp->Parameters.Write.ByteOffset.LowPart); 314 DebugTrace( 0, Dbg, "ByteOffset.HighPart = %8lx\n", IrpSp->Parameters.Write.ByteOffset.HighPart); 315 316 // 317 // Initialize the appropriate local variables. 318 // 319 320 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); 321 PagingIo = BooleanFlagOn(Irp->Flags, IRP_PAGING_IO); 322 NonCachedIo = BooleanFlagOn(Irp->Flags,IRP_NOCACHE); 323 SynchronousIo = BooleanFlagOn(FileObject->Flags, FO_SYNCHRONOUS_IO); 324 325 //NT_ASSERT( PagingIo || FileObject->WriteAccess ); 326 327 // 328 // Extract the bytecount and do our noop/throttle checking. 329 // 330 331 ByteCount = IrpSp->Parameters.Write.Length; 332 333 // 334 // If there is nothing to write, return immediately. 335 // 336 337 if (ByteCount == 0) { 338 339 Irp->IoStatus.Information = 0; 340 FatCompleteRequest( IrpContext, Irp, STATUS_SUCCESS ); 341 return STATUS_SUCCESS; 342 } 343 344 // 345 // See if we have to defer the write. 346 // 347 348 if (!NonCachedIo && 349 !CcCanIWrite(FileObject, 350 ByteCount, 351 (BOOLEAN)(Wait && !BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_IN_FSP)), 352 BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_DEFERRED_WRITE))) { 353 354 BOOLEAN Retrying = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_DEFERRED_WRITE); 355 356 FatPrePostIrp( IrpContext, Irp ); 357 358 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_DEFERRED_WRITE ); 359 360 CcDeferWrite( FileObject, 361 (PCC_POST_DEFERRED_WRITE)FatAddToWorkque, 362 IrpContext, 363 Irp, 364 ByteCount, 365 Retrying ); 366 367 return STATUS_PENDING; 368 } 369 370 // 371 // Determine our starting position and type. If we are writing 372 // at EOF, then we will need additional synchronization before 373 // the IO is issued to determine where the data will go. 374 // 375 376 StartingByte = IrpSp->Parameters.Write.ByteOffset; 377 StartingVbo = StartingByte.LowPart; 378 379 WriteToEof = ( (StartingByte.LowPart == FILE_WRITE_TO_END_OF_FILE) && 380 (StartingByte.HighPart == -1) ); 381 382 // 383 // Extract the nature of the write from the file object, and case on it 384 // 385 386 TypeOfOpen = FatDecodeFileObject(FileObject, &Vcb, &FcbOrDcb, &Ccb); 387 388 NT_ASSERT( Vcb != NULL ); 389 390 // 391 // Save callers who try to do cached IO to the raw volume from themselves. 392 // 393 394 if (TypeOfOpen == UserVolumeOpen) { 395 396 NonCachedIo = TRUE; 397 } 398 399 NT_ASSERT(!(NonCachedIo == FALSE && TypeOfOpen == VirtualVolumeFile)); 400 401 // 402 // Collect interesting statistics. The FLAG_USER_IO bit will indicate 403 // what type of io we're doing in the FatNonCachedIo function. 404 // 405 406 if (PagingIo) { 407 CollectWriteStats(Vcb, TypeOfOpen, ByteCount); 408 409 if (TypeOfOpen == UserFileOpen) { 410 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_USER_IO); 411 } else { 412 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_USER_IO); 413 } 414 } 415 416 // 417 // We must disallow writes to regular objects that would require us 418 // to maintain an AllocationSize of greater than 32 significant bits. 419 // 420 // If this is paging IO, this is simply a case where we need to trim. 421 // This will occur in due course. 422 // 423 424 if (!PagingIo && !WriteToEof && (TypeOfOpen != UserVolumeOpen)) { 425 426 427 if (!FatIsIoRangeValid( Vcb, StartingByte, ByteCount)) { 428 429 430 Irp->IoStatus.Information = 0; 431 FatCompleteRequest( IrpContext, Irp, STATUS_DISK_FULL ); 432 433 return STATUS_DISK_FULL; 434 } 435 } 436 437 // 438 // Allocate if necessary and initialize a FAT_IO_CONTEXT block for 439 // all non cached Io. For synchronous Io 440 // we use stack storage, otherwise we allocate pool. 441 // 442 443 if (NonCachedIo) { 444 445 if (IrpContext->FatIoContext == NULL) { 446 447 if (!Wait) { 448 449 IrpContext->FatIoContext = 450 FsRtlAllocatePoolWithTag( NonPagedPoolNx, 451 sizeof(FAT_IO_CONTEXT), 452 TAG_FAT_IO_CONTEXT ); 453 454 } else { 455 456 IrpContext->FatIoContext = &StackFatIoContext; 457 458 SetFlag( IrpContext->Flags, IRP_CONTEXT_STACK_IO_CONTEXT ); 459 } 460 } 461 462 RtlZeroMemory( IrpContext->FatIoContext, sizeof(FAT_IO_CONTEXT) ); 463 464 if (Wait) { 465 466 KeInitializeEvent( &IrpContext->FatIoContext->Wait.SyncEvent, 467 NotificationEvent, 468 FALSE ); 469 470 } else { 471 472 if (PagingIo) { 473 474 IrpContext->FatIoContext->Wait.Async.ResourceThreadId = 475 ExGetCurrentResourceThread(); 476 477 } else { 478 479 IrpContext->FatIoContext->Wait.Async.ResourceThreadId = 480 ((ULONG_PTR)IrpContext->FatIoContext) | 3; 481 } 482 483 IrpContext->FatIoContext->Wait.Async.RequestedByteCount = 484 ByteCount; 485 486 IrpContext->FatIoContext->Wait.Async.FileObject = FileObject; 487 } 488 489 } 490 491 // 492 // Check if this volume has already been shut down. If it has, fail 493 // this write request. 494 // 495 496 if ( FlagOn(Vcb->VcbState, VCB_STATE_FLAG_SHUTDOWN) ) { 497 498 Irp->IoStatus.Information = 0; 499 FatCompleteRequest( IrpContext, Irp, STATUS_TOO_LATE ); 500 return STATUS_TOO_LATE; 501 } 502 503 // 504 // This case corresponds to a write of the volume file (only the first 505 // fat allowed, the other fats are written automatically in parallel). 506 // 507 // We use an Mcb keep track of dirty sectors. Actual entries are Vbos 508 // and Lbos (ie. bytes), though they are all added in sector chunks. 509 // Since Vbo == Lbo for the volume file, the Mcb entries 510 // alternate between runs of Vbo == Lbo, and holes (Lbo == 0). We use 511 // the prior to represent runs of dirty fat sectors, and the latter 512 // for runs of clean fat. Note that since the first part of the volume 513 // file (boot sector) is always clean (a hole), and an Mcb never ends in 514 // a hole, there must always be an even number of runs(entries) in the Mcb. 515 // 516 // The strategy is to find the first and last dirty run in the desired 517 // write range (which will always be a set of pages), and write from the 518 // former to the later. The may result in writing some clean data, but 519 // will generally be more efficient than writing each runs seperately. 520 // 521 522 if (TypeOfOpen == VirtualVolumeFile) { 523 524 LBO DirtyLbo; 525 LBO CleanLbo; 526 527 VBO DirtyVbo; 528 VBO StartingDirtyVbo; 529 530 ULONG DirtyByteCount; 531 ULONG CleanByteCount; 532 533 ULONG WriteLength; 534 535 BOOLEAN MoreDirtyRuns = TRUE; 536 537 IO_STATUS_BLOCK RaiseIosb; 538 539 DebugTrace(0, Dbg, "Type of write is Virtual Volume File\n", 0); 540 541 // 542 // If we can't wait we have to post this. 543 // 544 545 if (!Wait) { 546 547 DebugTrace( 0, Dbg, "Passing request to Fsp\n", 0 ); 548 549 Status = FatFsdPostRequest(IrpContext, Irp); 550 551 return Status; 552 } 553 554 // 555 // If we weren't called by the Lazy Writer, then this write 556 // must be the result of a write-through or flush operation. 557 // Setting the IrpContext flag, will cause DevIoSup.c to 558 // write-through the data to the disk. 559 // 560 561 if (!FlagOn((ULONG_PTR)IoGetTopLevelIrp(), FSRTL_CACHE_TOP_LEVEL_IRP)) { 562 563 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH ); 564 } 565 566 // 567 // Assert an even number of entries in the Mcb, an odd number would 568 // mean that the Mcb is corrupt. 569 // 570 571 NT_ASSERT( (FsRtlNumberOfRunsInLargeMcb( &Vcb->DirtyFatMcb ) & 1) == 0); 572 573 // 574 // We need to skip over any clean sectors at the start of the write. 575 // 576 // Also check the two cases where there are no dirty fats in the 577 // desired write range, and complete them with success. 578 // 579 // 1) There is no Mcb entry corresponding to StartingVbo, meaning 580 // we are beyond the end of the Mcb, and thus dirty fats. 581 // 582 // 2) The run at StartingVbo is clean and continues beyond the 583 // desired write range. 584 // 585 586 if (!FatLookupMcbEntry( Vcb, &Vcb->DirtyFatMcb, 587 StartingVbo, 588 &DirtyLbo, 589 &DirtyByteCount, 590 NULL ) 591 592 || ( (DirtyLbo == 0) && (DirtyByteCount >= ByteCount) ) ) { 593 594 DebugTrace(0, DEBUG_TRACE_DEBUG_HOOKS, 595 "No dirty fat sectors in the write range.\n", 0); 596 597 FatCompleteRequest( IrpContext, Irp, STATUS_SUCCESS ); 598 return STATUS_SUCCESS; 599 } 600 601 DirtyVbo = (VBO)DirtyLbo; 602 603 // 604 // If the last run was a hole (clean), up DirtyVbo to the next 605 // run, which must be dirty. 606 // 607 608 if (DirtyVbo == 0) { 609 610 DirtyVbo = StartingVbo + DirtyByteCount; 611 } 612 613 // 614 // This is where the write will start. 615 // 616 617 StartingDirtyVbo = DirtyVbo; 618 619 // 620 // 621 // Now start enumerating the dirty fat sectors spanning the desired 622 // write range, this first one of which is now DirtyVbo. 623 // 624 625 while ( MoreDirtyRuns ) { 626 627 // 628 // Find the next dirty run, if it is not there, the Mcb ended 629 // in a hole, or there is some other corruption of the Mcb. 630 // 631 632 if (!FatLookupMcbEntry( Vcb, &Vcb->DirtyFatMcb, 633 DirtyVbo, 634 &DirtyLbo, 635 &DirtyByteCount, 636 NULL )) { 637 638 #ifdef _MSC_VER 639 #pragma prefast( suppress:28931, "needed for debug build" ) 640 #endif 641 DirtyVbo = (VBO)DirtyLbo; 642 643 DebugTrace(0, Dbg, "Last dirty fat Mcb entry was a hole: corrupt.\n", 0); 644 645 #ifdef _MSC_VER 646 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" ) 647 #endif 648 FatBugCheck( 0, 0, 0 ); 649 650 } else { 651 652 DirtyVbo = (VBO)DirtyLbo; 653 654 // 655 // This has to correspond to a dirty run, and must start 656 // within the write range since we check it at entry to, 657 // and at the bottom of this loop. 658 // 659 660 NT_ASSERT((DirtyVbo != 0) && (DirtyVbo < StartingVbo + ByteCount)); 661 662 // 663 // There are three ways we can know that this was the 664 // last dirty run we want to write. 665 // 666 // 1) The current dirty run extends beyond or to the 667 // desired write range. 668 // 669 // 2) On trying to find the following clean run, we 670 // discover that this is the last run in the Mcb. 671 // 672 // 3) The following clean run extend beyond the 673 // desired write range. 674 // 675 // In any of these cases we set MoreDirtyRuns = FALSE. 676 // 677 678 // 679 // If the run is larger than we are writing, we also 680 // must truncate the WriteLength. This is benign in 681 // the equals case. 682 // 683 684 if (DirtyVbo + DirtyByteCount >= StartingVbo + ByteCount) { 685 686 DirtyByteCount = StartingVbo + ByteCount - DirtyVbo; 687 688 MoreDirtyRuns = FALSE; 689 690 } else { 691 692 // 693 // Scan the clean hole after this dirty run. If this 694 // run was the last, prepare to exit the loop 695 // 696 697 if (!FatLookupMcbEntry( Vcb, &Vcb->DirtyFatMcb, 698 DirtyVbo + DirtyByteCount, 699 &CleanLbo, 700 &CleanByteCount, 701 NULL )) { 702 703 MoreDirtyRuns = FALSE; 704 705 } else { 706 707 // 708 // Assert that we actually found a clean run. 709 // and compute the start of the next dirty run. 710 // 711 712 NT_ASSERT (CleanLbo == 0); 713 714 // 715 // If the next dirty run starts beyond the desired 716 // write, we have found all the runs we need, so 717 // prepare to exit. 718 // 719 720 if (DirtyVbo + DirtyByteCount + CleanByteCount >= 721 StartingVbo + ByteCount) { 722 723 MoreDirtyRuns = FALSE; 724 725 } else { 726 727 // 728 // Compute the start of the next dirty run. 729 // 730 731 DirtyVbo += DirtyByteCount + CleanByteCount; 732 } 733 } 734 } 735 } 736 } // while ( MoreDirtyRuns ) 737 738 // 739 // At this point DirtyVbo and DirtyByteCount correctly reflect the 740 // final dirty run, constrained to the desired write range. 741 // 742 // Now compute the length we finally must write. 743 // 744 745 WriteLength = (DirtyVbo + DirtyByteCount) - StartingDirtyVbo; 746 747 // 748 // We must now assume that the write will complete with success, 749 // and initialize our expected status in RaiseIosb. It will be 750 // modified below if an error occurs. 751 // 752 753 RaiseIosb.Status = STATUS_SUCCESS; 754 RaiseIosb.Information = ByteCount; 755 756 // 757 // Loop through all the fats, setting up a multiple async to 758 // write them all. If there are more than FAT_MAX_PARALLEL_IOS 759 // then we do several muilple asyncs. 760 // 761 762 { 763 ULONG Fat; 764 ULONG BytesPerFat; 765 IO_RUN StackIoRuns[2]; 766 PIO_RUN IoRuns; 767 768 BytesPerFat = FatBytesPerFat( &Vcb->Bpb ); 769 770 if ((ULONG)Vcb->Bpb.Fats > 2) { 771 772 IoRuns = FsRtlAllocatePoolWithTag( PagedPool, 773 (ULONG)(Vcb->Bpb.Fats*sizeof(IO_RUN)), 774 TAG_IO_RUNS ); 775 776 } else { 777 778 IoRuns = StackIoRuns; 779 } 780 781 for (Fat = 0; Fat < (ULONG)Vcb->Bpb.Fats; Fat++) { 782 783 IoRuns[Fat].Vbo = StartingDirtyVbo; 784 IoRuns[Fat].Lbo = Fat * BytesPerFat + StartingDirtyVbo; 785 IoRuns[Fat].Offset = StartingDirtyVbo - StartingVbo; 786 IoRuns[Fat].ByteCount = WriteLength; 787 } 788 789 // 790 // Keep track of meta-data disk ios. 791 // 792 793 Vcb->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors].Common.MetaDataDiskWrites += Vcb->Bpb.Fats; 794 795 _SEH2_TRY { 796 797 FatMultipleAsync( IrpContext, 798 Vcb, 799 Irp, 800 (ULONG)Vcb->Bpb.Fats, 801 IoRuns ); 802 803 } _SEH2_FINALLY { 804 805 if (IoRuns != StackIoRuns) { 806 807 ExFreePool( IoRuns ); 808 } 809 } _SEH2_END; 810 811 #if (NTDDI_VERSION >= NTDDI_WIN8) 812 813 // 814 // Account for DASD Ios 815 // 816 817 if (FatDiskAccountingEnabled) { 818 819 PETHREAD ThreadIssuingIo = PsGetCurrentThread(); 820 821 PsUpdateDiskCounters( PsGetThreadProcess( ThreadIssuingIo ), 822 0, 823 WriteLength, 824 0, 825 1, 826 0 ); 827 } 828 829 #endif 830 // 831 // Wait for all the writes to finish 832 // 833 834 FatWaitSync( IrpContext ); 835 836 // 837 // If we got an error, or verify required, remember it. 838 // 839 840 if (!NT_SUCCESS( Irp->IoStatus.Status )) { 841 842 DebugTrace( 0, 843 Dbg, 844 "Error %X while writing volume file.\n", 845 Irp->IoStatus.Status ); 846 847 RaiseIosb = Irp->IoStatus; 848 } 849 } 850 851 // 852 // If the writes were a success, set the sectors clean, else 853 // raise the error status and mark the volume as needing 854 // verification. This will automatically reset the volume 855 // structures. 856 // 857 // If not, then mark this volume as needing verification to 858 // automatically cause everything to get cleaned up. 859 // 860 861 Irp->IoStatus = RaiseIosb; 862 863 if ( NT_SUCCESS( Status = Irp->IoStatus.Status )) { 864 865 FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 866 StartingDirtyVbo, 867 WriteLength ); 868 869 } else { 870 871 FatNormalizeAndRaiseStatus( IrpContext, Status ); 872 } 873 874 DebugTrace(-1, Dbg, "CommonWrite -> %08lx\n", Status ); 875 876 FatCompleteRequest( IrpContext, Irp, Status ); 877 return Status; 878 } 879 880 // 881 // This case corresponds to a general opened volume (DASD), ie. 882 // open ("a:"). 883 // 884 885 if (TypeOfOpen == UserVolumeOpen) { 886 887 LBO StartingLbo; 888 LBO VolumeSize; 889 890 // 891 // Precalculate the volume size since we're nearly always going 892 // to be wanting to use it. 893 // 894 895 VolumeSize = (LBO) Int32x32To64( Vcb->Bpb.BytesPerSector, 896 (Vcb->Bpb.Sectors != 0 ? Vcb->Bpb.Sectors : 897 Vcb->Bpb.LargeSectors)); 898 899 StartingLbo = StartingByte.QuadPart; 900 901 DebugTrace(0, Dbg, "Type of write is User Volume.\n", 0); 902 903 // 904 // If this is a write on a disk-based volume that is not locked, we need to limit 905 // the sectors we allow to be written within the volume. Specifically, we only 906 // allow writes to the reserved area. Note that extended DASD can still be used 907 // to write past the end of the volume. We also allow kernel mode callers to force 908 // access via a flag in the IRP. A handle that issued a dismount can write anywhere 909 // as well. 910 // 911 912 if ((Vcb->TargetDeviceObject->DeviceType == FILE_DEVICE_DISK) && 913 !FlagOn( Vcb->VcbState, VCB_STATE_FLAG_LOCKED ) && 914 !FlagOn( IrpSp->Flags, SL_FORCE_DIRECT_WRITE ) && 915 !FlagOn( Ccb->Flags, CCB_FLAG_COMPLETE_DISMOUNT )) { 916 917 // 918 // First check for a write beyond the end of the volume. 919 // 920 921 if (!WriteToEof && (StartingLbo < VolumeSize)) { 922 923 // 924 // This write is within the volume. Make sure it is not beyond the reserved section. 925 // 926 927 if ((StartingLbo >= FatReservedBytes( &(Vcb->Bpb) )) || 928 (ByteCount > (FatReservedBytes( &(Vcb->Bpb) ) - StartingLbo))) { 929 930 FatCompleteRequest( IrpContext, Irp, STATUS_ACCESS_DENIED ); 931 return STATUS_ACCESS_DENIED; 932 } 933 } 934 } 935 936 // 937 // Verify that the volume for this handle is still valid, permitting 938 // operations to proceed on dismounted volumes via the handle which 939 // performed the dismount or sent a format unit command. 940 // 941 942 if (!FlagOn( Ccb->Flags, CCB_FLAG_COMPLETE_DISMOUNT | CCB_FLAG_SENT_FORMAT_UNIT )) { 943 944 FatQuickVerifyVcb( IrpContext, Vcb ); 945 } 946 947 // 948 // If the caller previously sent a format unit command, then we will allow 949 // their read/write requests to ignore the verify flag on the device, since some 950 // devices send a media change event after format unit, but we don't want to 951 // process it yet since we're probably in the process of formatting the 952 // media. 953 // 954 955 if (FlagOn( Ccb->Flags, CCB_FLAG_SENT_FORMAT_UNIT )) { 956 957 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY ); 958 } 959 960 if (!FlagOn( Ccb->Flags, CCB_FLAG_DASD_PURGE_DONE )) { 961 962 BOOLEAN PreviousWait = BooleanFlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ); 963 964 // 965 // Grab the entire volume so that even the normally unsafe action 966 // of writing to an unlocked volume won't open us to a race between 967 // the flush and purge of the FAT below. 968 // 969 // I really don't think this is particularly important to worry about, 970 // but a repro case for another bug happens to dance into this race 971 // condition pretty easily. Eh. 972 // 973 974 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ); 975 FatAcquireExclusiveVolume( IrpContext, Vcb ); 976 977 _SEH2_TRY { 978 979 // 980 // If the volume isn't locked, flush and purge it. 981 // 982 983 if (!FlagOn(Vcb->VcbState, VCB_STATE_FLAG_LOCKED)) { 984 985 FatFlushFat( IrpContext, Vcb ); 986 CcPurgeCacheSection( &Vcb->SectionObjectPointers, 987 NULL, 988 0, 989 FALSE ); 990 991 FatPurgeReferencedFileObjects( IrpContext, Vcb->RootDcb, Flush ); 992 } 993 994 } _SEH2_FINALLY { 995 996 FatReleaseVolume( IrpContext, Vcb ); 997 if (!PreviousWait) { 998 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ); 999 } 1000 } _SEH2_END; 1001 1002 SetFlag( Ccb->Flags, CCB_FLAG_DASD_PURGE_DONE | 1003 CCB_FLAG_DASD_FLUSH_DONE ); 1004 } 1005 1006 if (!FlagOn( Ccb->Flags, CCB_FLAG_ALLOW_EXTENDED_DASD_IO )) { 1007 1008 // 1009 // Make sure we don't try to write past end of volume, 1010 // reducing the requested byte count if necessary. 1011 // 1012 1013 if (WriteToEof || StartingLbo >= VolumeSize) { 1014 FatCompleteRequest( IrpContext, Irp, STATUS_SUCCESS ); 1015 return STATUS_SUCCESS; 1016 } 1017 1018 if (ByteCount > VolumeSize - StartingLbo) { 1019 1020 ByteCount = (ULONG) (VolumeSize - StartingLbo); 1021 1022 // 1023 // For async writes we had set the byte count in the FatIoContext 1024 // above, so fix that here. 1025 // 1026 1027 if (!Wait) { 1028 1029 IrpContext->FatIoContext->Wait.Async.RequestedByteCount = 1030 ByteCount; 1031 } 1032 } 1033 } else { 1034 1035 // 1036 // This has a peculiar interpretation, but just adjust the starting 1037 // byte to the end of the visible volume. 1038 // 1039 1040 if (WriteToEof) { 1041 1042 StartingLbo = VolumeSize; 1043 } 1044 } 1045 1046 // 1047 // For DASD we have to probe and lock the user's buffer 1048 // 1049 1050 FatLockUserBuffer( IrpContext, Irp, IoReadAccess, ByteCount ); 1051 1052 // 1053 // Set the FO_MODIFIED flag here to trigger a verify when this 1054 // handle is closed. Note that we can err on the conservative 1055 // side with no problem, i.e. if we accidently do an extra 1056 // verify there is no problem. 1057 // 1058 1059 SetFlag( FileObject->Flags, FO_FILE_MODIFIED ); 1060 1061 // 1062 // Write the data and wait for the results 1063 // 1064 1065 FatSingleAsync( IrpContext, 1066 Vcb, 1067 StartingLbo, 1068 ByteCount, 1069 Irp ); 1070 1071 #if (NTDDI_VERSION >= NTDDI_WIN8) 1072 1073 // 1074 // Account for DASD Ios 1075 // 1076 1077 if (FatDiskAccountingEnabled) { 1078 1079 PETHREAD ThreadIssuingIo = PsGetCurrentThread(); 1080 1081 PsUpdateDiskCounters( PsGetThreadProcess( ThreadIssuingIo ), 1082 0, 1083 ByteCount, 1084 0, 1085 1, 1086 0 ); 1087 } 1088 1089 #endif 1090 1091 if (!Wait) { 1092 1093 // 1094 // We, nor anybody else, need the IrpContext any more. 1095 // 1096 1097 IrpContext->FatIoContext = NULL; 1098 1099 FatDeleteIrpContext( IrpContext ); 1100 1101 DebugTrace(-1, Dbg, "FatNonCachedIo -> STATUS_PENDING\n", 0); 1102 1103 return STATUS_PENDING; 1104 } 1105 1106 FatWaitSync( IrpContext ); 1107 1108 // 1109 // If the call didn't succeed, raise the error status 1110 // 1111 // Also mark this volume as needing verification to automatically 1112 // cause everything to get cleaned up. 1113 // 1114 1115 if (!NT_SUCCESS( Status = Irp->IoStatus.Status )) { 1116 1117 FatNormalizeAndRaiseStatus( IrpContext, Status ); 1118 } 1119 1120 // 1121 // Update the current file position. We assume that 1122 // open/create zeros out the CurrentByteOffset field. 1123 // 1124 1125 if (SynchronousIo && !PagingIo) { 1126 FileObject->CurrentByteOffset.QuadPart = 1127 StartingLbo + Irp->IoStatus.Information; 1128 } 1129 1130 DebugTrace(-1, Dbg, "FatCommonWrite -> %08lx\n", Status ); 1131 1132 FatCompleteRequest( IrpContext, Irp, Status ); 1133 return Status; 1134 } 1135 1136 // 1137 // At this point we know there is an Fcb/Dcb. 1138 // 1139 1140 NT_ASSERT( FcbOrDcb != NULL ); 1141 1142 // 1143 // Use a try-finally to free Fcb/Dcb and buffers on the way out. 1144 // 1145 1146 _SEH2_TRY { 1147 1148 // 1149 // This case corresponds to a normal user write file. 1150 // 1151 1152 if ( TypeOfOpen == UserFileOpen 1153 ) { 1154 1155 ULONG ValidDataLength; 1156 ULONG ValidDataToDisk; 1157 ULONG ValidDataToCheck; 1158 1159 DebugTrace(0, Dbg, "Type of write is user file open\n", 0); 1160 1161 // 1162 // If this is a noncached transfer and is not a paging I/O, and 1163 // the file has been opened cached, then we will do a flush here 1164 // to avoid stale data problems. Note that we must flush before 1165 // acquiring the Fcb shared since the write may try to acquire 1166 // it exclusive. 1167 // 1168 // The Purge following the flush will guarentee cache coherency. 1169 // 1170 1171 if (NonCachedIo && !PagingIo && 1172 (FileObject->SectionObjectPointer->DataSectionObject != NULL)) { 1173 1174 IO_STATUS_BLOCK IoStatus = {0}; 1175 1176 // 1177 // We need the Fcb exclsuive to do the CcPurgeCache 1178 // 1179 1180 if (!FatAcquireExclusiveFcb( IrpContext, FcbOrDcb )) { 1181 1182 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb ); 1183 1184 try_return( PostIrp = TRUE ); 1185 } 1186 1187 FcbOrDcbAcquired = TRUE; 1188 FcbAcquiredExclusive = TRUE; 1189 1190 // 1191 // Preacquire pagingio for the flush. 1192 // 1193 1194 ExAcquireResourceExclusiveLite( FcbOrDcb->Header.PagingIoResource, TRUE ); 1195 1196 #if (NTDDI_VERSION >= NTDDI_WIN7) 1197 1198 // 1199 // Remember that we are holding the paging I/O resource. 1200 // 1201 1202 PagingIoResourceAcquired = TRUE; 1203 1204 // 1205 // We hold so that we will prevent a pagefault from occuring and seeing 1206 // soon-to-be stale data from the disk. We used to believe this was 1207 // something to be left to the app to synchronize; we now realize that 1208 // noncached IO on a fileserver is doomed without the filesystem forcing 1209 // the coherency issue. By only penalizing noncached coherency when 1210 // needed, this is about the best we can do. 1211 // 1212 1213 // 1214 // Now perform the coherency flush and purge operation. This version of the call 1215 // will try to invalidate mapped pages to prevent data corruption. 1216 // 1217 1218 CcCoherencyFlushAndPurgeCache( FileObject->SectionObjectPointer, 1219 WriteToEof ? &FcbOrDcb->Header.FileSize : &StartingByte, 1220 ByteCount, 1221 &IoStatus, 1222 0 ); 1223 1224 SuccessfulPurge = NT_SUCCESS( IoStatus.Status ); 1225 1226 #else 1227 1228 CcFlushCache( FileObject->SectionObjectPointer, 1229 WriteToEof ? &FcbOrDcb->Header.FileSize : &StartingByte, 1230 ByteCount, 1231 &IoStatus ); 1232 1233 if (!NT_SUCCESS( IoStatus.Status )) { 1234 1235 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource ); 1236 try_return( IoStatus.Status ); 1237 } 1238 1239 // 1240 // Remember that we are holding the paging I/O resource. 1241 // 1242 1243 PagingIoResourceAcquired = TRUE; 1244 1245 // 1246 // We hold so that we will prevent a pagefault from occuring and seeing 1247 // soon-to-be stale data from the disk. We used to believe this was 1248 // something to be left to the app to synchronize; we now realize that 1249 // noncached IO on a fileserver is doomed without the filesystem forcing 1250 // the coherency issue. By only penalizing noncached coherency when 1251 // needed, this is about the best we can do. 1252 // 1253 1254 SuccessfulPurge = CcPurgeCacheSection( FileObject->SectionObjectPointer, 1255 WriteToEof ? &FcbOrDcb->Header.FileSize : &StartingByte, 1256 ByteCount, 1257 FALSE ); 1258 1259 #endif 1260 1261 if (!SuccessfulPurge && (FcbOrDcb->PurgeFailureModeEnableCount > 0)) { 1262 1263 // 1264 // Purge failure mode only applies to user files. 1265 // 1266 1267 NT_ASSERT( TypeOfOpen == UserFileOpen ); 1268 1269 // 1270 // Do not swallow the purge failure if in purge failure 1271 // mode. Someone outside the file system intends to handle 1272 // the error and prevent any application compatibilty 1273 // issue. 1274 // 1275 // NOTE: If the file system were not preventing a pagefault 1276 // from processing while this write is in flight, which it does 1277 // by holding the paging resource across the write, it would 1278 // need to fail the operation even if a purge succeeded. If 1279 // not a memory mapped read could bring in a stale page before 1280 // the write makes it to disk. 1281 // 1282 1283 try_return( Status = STATUS_PURGE_FAILED ); 1284 } 1285 1286 // 1287 // Indicate we're OK with the fcb being demoted to shared access 1288 // if that turns out to be possible later on after VDL extension 1289 // is checked for. 1290 // 1291 // PagingIo must be held all the way through. 1292 // 1293 1294 FcbCanDemoteToShared = TRUE; 1295 } 1296 1297 // 1298 // We assert that Paging Io writes will never WriteToEof. 1299 // 1300 1301 NT_ASSERT( WriteToEof ? !PagingIo : TRUE ); 1302 1303 // 1304 // First let's acquire the Fcb shared. Shared is enough if we 1305 // are not writing beyond EOF. 1306 // 1307 1308 if ( PagingIo ) { 1309 1310 (VOID)ExAcquireResourceSharedLite( FcbOrDcb->Header.PagingIoResource, TRUE ); 1311 PagingIoResourceAcquired = TRUE; 1312 1313 if (!Wait) { 1314 1315 IrpContext->FatIoContext->Wait.Async.Resource = 1316 FcbOrDcb->Header.PagingIoResource; 1317 } 1318 1319 // 1320 // Check to see if we colided with a MoveFile call, and if 1321 // so block until it completes. 1322 // 1323 1324 if (FcbOrDcb->MoveFileEvent) { 1325 1326 (VOID)KeWaitForSingleObject( FcbOrDcb->MoveFileEvent, 1327 Executive, 1328 KernelMode, 1329 FALSE, 1330 NULL ); 1331 } 1332 1333 } else { 1334 1335 // 1336 // We may already have the Fcb due to noncached coherency 1337 // work done just above; however, we may still have to extend 1338 // valid data length. We can't demote this to shared, matching 1339 // what occured before, until we figure that out a bit later. 1340 // 1341 // We kept ahold of it since our lockorder is main->paging, 1342 // and paging must now held across the noncached write from 1343 // the purge on. 1344 // 1345 1346 // 1347 // If this is async I/O, we will wait if there is an exclusive 1348 // waiter. 1349 // 1350 1351 if (!Wait && NonCachedIo) { 1352 1353 if (!FcbOrDcbAcquired && 1354 !FatAcquireSharedFcbWaitForEx( IrpContext, FcbOrDcb )) { 1355 1356 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb ); 1357 try_return( PostIrp = TRUE ); 1358 } 1359 1360 // 1361 // Note we will have to release this resource elsewhere. If we came 1362 // out of the noncached coherency path, we will also have to drop 1363 // the paging io resource. 1364 // 1365 1366 IrpContext->FatIoContext->Wait.Async.Resource = FcbOrDcb->Header.Resource; 1367 1368 if (FcbCanDemoteToShared) { 1369 1370 IrpContext->FatIoContext->Wait.Async.Resource2 = FcbOrDcb->Header.PagingIoResource; 1371 } 1372 } else { 1373 1374 if (!FcbOrDcbAcquired && 1375 !FatAcquireSharedFcb( IrpContext, FcbOrDcb )) { 1376 1377 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb ); 1378 try_return( PostIrp = TRUE ); 1379 } 1380 } 1381 1382 FcbOrDcbAcquired = TRUE; 1383 } 1384 1385 // 1386 // Get a first tentative file size and valid data length. 1387 // We must get ValidDataLength first since it is always 1388 // increased second (in case we are unprotected) and 1389 // we don't want to capture ValidDataLength > FileSize. 1390 // 1391 1392 ValidDataToDisk = FcbOrDcb->ValidDataToDisk; 1393 ValidDataLength = FcbOrDcb->Header.ValidDataLength.LowPart; 1394 FileSize = FcbOrDcb->Header.FileSize.LowPart; 1395 1396 NT_ASSERT( ValidDataLength <= FileSize ); 1397 1398 // 1399 // If are paging io, then we do not want 1400 // to write beyond end of file. If the base is beyond Eof, we will just 1401 // Noop the call. If the transfer starts before Eof, but extends 1402 // beyond, we will truncate the transfer to the last sector 1403 // boundary. 1404 // 1405 1406 // 1407 // Just in case this is paging io, limit write to file size. 1408 // Otherwise, in case of write through, since Mm rounds up 1409 // to a page, we might try to acquire the resource exclusive 1410 // when our top level guy only acquired it shared. Thus, =><=. 1411 // 1412 1413 if ( PagingIo ) { 1414 1415 if (StartingVbo >= FileSize) { 1416 1417 DebugTrace( 0, Dbg, "PagingIo started beyond EOF.\n", 0 ); 1418 1419 Irp->IoStatus.Information = 0; 1420 1421 try_return( Status = STATUS_SUCCESS ); 1422 } 1423 1424 if (ByteCount > FileSize - StartingVbo) { 1425 1426 DebugTrace( 0, Dbg, "PagingIo extending beyond EOF.\n", 0 ); 1427 1428 ByteCount = FileSize - StartingVbo; 1429 } 1430 } 1431 1432 // 1433 // Determine if we were called by the lazywriter. 1434 // (see resrcsup.c) 1435 // 1436 1437 if (FcbOrDcb->Specific.Fcb.LazyWriteThread == PsGetCurrentThread()) { 1438 1439 CalledByLazyWriter = TRUE; 1440 1441 if (FlagOn( FcbOrDcb->Header.Flags, FSRTL_FLAG_USER_MAPPED_FILE )) { 1442 1443 // 1444 // Fail if the start of this request is beyond valid data length. 1445 // Don't worry if this is an unsafe test. MM and CC won't 1446 // throw this page away if it is really dirty. 1447 // 1448 1449 if ((StartingVbo + ByteCount > ValidDataLength) && 1450 (StartingVbo < FileSize)) { 1451 1452 // 1453 // It's OK if byte range is within the page containing valid data length, 1454 // since we will use ValidDataToDisk as the start point. 1455 // 1456 1457 if (StartingVbo + ByteCount > ((ValidDataLength + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))) { 1458 1459 // 1460 // Don't flush this now. 1461 // 1462 1463 try_return( Status = STATUS_FILE_LOCK_CONFLICT ); 1464 } 1465 } 1466 } 1467 } 1468 1469 // 1470 // This code detects if we are a recursive synchronous page write 1471 // on a write through file object. 1472 // 1473 1474 if (FlagOn(Irp->Flags, IRP_SYNCHRONOUS_PAGING_IO) && 1475 FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_RECURSIVE_CALL)) { 1476 1477 PIRP TopIrp; 1478 1479 TopIrp = IoGetTopLevelIrp(); 1480 1481 // 1482 // This clause determines if the top level request was 1483 // in the FastIo path. Gack. Since we don't have a 1484 // real sharing protocol for the top level IRP field ... 1485 // yet ... if someone put things other than a pure IRP in 1486 // there we best be careful. 1487 // 1488 1489 if ((ULONG_PTR)TopIrp > FSRTL_MAX_TOP_LEVEL_IRP_FLAG && 1490 NodeType(TopIrp) == IO_TYPE_IRP) { 1491 1492 PIO_STACK_LOCATION IrpStack; 1493 1494 IrpStack = IoGetCurrentIrpStackLocation(TopIrp); 1495 1496 // 1497 // Finally this routine detects if the Top irp was a 1498 // cached write to this file and thus we are the writethrough. 1499 // 1500 1501 if ((IrpStack->MajorFunction == IRP_MJ_WRITE) && 1502 (IrpStack->FileObject->FsContext == FileObject->FsContext) && 1503 !FlagOn(TopIrp->Flags,IRP_NOCACHE)) { 1504 1505 RecursiveWriteThrough = TRUE; 1506 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH ); 1507 } 1508 } 1509 } 1510 1511 // 1512 // Here is the deal with ValidDataLength and FileSize: 1513 // 1514 // Rule 1: PagingIo is never allowed to extend file size. 1515 // 1516 // Rule 2: Only the top level requestor may extend Valid 1517 // Data Length. This may be paging IO, as when a 1518 // a user maps a file, but will never be as a result 1519 // of cache lazy writer writes since they are not the 1520 // top level request. 1521 // 1522 // Rule 3: If, using Rules 1 and 2, we decide we must extend 1523 // file size or valid data, we take the Fcb exclusive. 1524 // 1525 1526 // 1527 // Now see if we are writing beyond valid data length, and thus 1528 // maybe beyond the file size. If so, then we must 1529 // release the Fcb and reacquire it exclusive. Note that it is 1530 // important that when not writing beyond EOF that we check it 1531 // while acquired shared and keep the FCB acquired, in case some 1532 // turkey truncates the file. 1533 // 1534 1535 // 1536 // Note that the lazy writer must not be allowed to try and 1537 // acquire the resource exclusive. This is not a problem since 1538 // the lazy writer is paging IO and thus not allowed to extend 1539 // file size, and is never the top level guy, thus not able to 1540 // extend valid data length. 1541 // 1542 1543 if ( !CalledByLazyWriter && 1544 1545 !RecursiveWriteThrough && 1546 1547 (WriteToEof || 1548 StartingVbo + ByteCount > ValidDataLength)) { 1549 1550 // 1551 // If this was an asynchronous write, we are going to make 1552 // the request synchronous at this point, but only kinda. 1553 // At the last moment, before sending the write off to the 1554 // driver, we may shift back to async. 1555 // 1556 // The modified page writer already has the resources 1557 // he requires, so this will complete in small finite 1558 // time. 1559 // 1560 1561 if (!Wait) { 1562 1563 Wait = TRUE; 1564 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ); 1565 1566 if (NonCachedIo) { 1567 1568 NT_ASSERT( TypeOfOpen == UserFileOpen ); 1569 1570 SwitchBackToAsync = TRUE; 1571 } 1572 } 1573 1574 // 1575 // We need Exclusive access to the Fcb/Dcb since we will 1576 // probably have to extend valid data and/or file. 1577 // 1578 1579 // 1580 // Y'know, the PagingIo case is a mapped page writer, and 1581 // MmFlushSection or the mapped page writer itself already 1582 // snatched up the main exclusive for us via the AcquireForCcFlush 1583 // or AcquireForModWrite logic (the default logic parallels FAT's 1584 // requirements since this order/model came first). Should ASSERT 1585 // this since it'll just go 1->2, and a few more unnecesary DPC 1586 // transitions. 1587 // 1588 // The preacquire is done to avoid inversion over the collided flush 1589 // meta-resource in Mm. The one time this is not true is at final 1590 // system shutdown time, when Mm goes off and flushes all the dirty 1591 // pages. Since the callback is defined as Wait == FALSE he can't 1592 // guarantee acquisition (though with clean process shutdown being 1593 // enforced, it really should be now). Permit this to float. 1594 // 1595 // Note that since we're going to fall back on the acquisition aleady 1596 // done for us, don't confuse things by thinking we did the work 1597 // for it. 1598 // 1599 1600 if ( PagingIo ) { 1601 1602 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource ); 1603 PagingIoResourceAcquired = FALSE; 1604 1605 } else { 1606 1607 // 1608 // The Fcb may already be acquired exclusive due to coherency 1609 // work performed earlier. If so, obviously no work to do. 1610 // 1611 1612 if (!FcbAcquiredExclusive) { 1613 1614 FatReleaseFcb( IrpContext, FcbOrDcb ); 1615 FcbOrDcbAcquired = FALSE; 1616 1617 if (!FatAcquireExclusiveFcb( IrpContext, FcbOrDcb )) { 1618 1619 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb ); 1620 1621 try_return( PostIrp = TRUE ); 1622 } 1623 1624 FcbOrDcbAcquired = TRUE; 1625 1626 #ifdef _MSC_VER 1627 #pragma prefast( suppress:28931, "convenient for debugging" ) 1628 #endif 1629 FcbAcquiredExclusive = TRUE; 1630 } 1631 } 1632 1633 // 1634 // Now that we have the Fcb exclusive, see if this write 1635 // qualifies for being made async again. The key point 1636 // here is that we are going to update ValidDataLength in 1637 // the Fcb before returning. We must make sure this will 1638 // not cause a problem. One thing we must do is keep out 1639 // the FastIo path. 1640 // 1641 1642 if (SwitchBackToAsync) { 1643 1644 if ((FcbOrDcb->NonPaged->SectionObjectPointers.DataSectionObject != NULL) || 1645 (StartingVbo + ByteCount > FcbOrDcb->Header.ValidDataLength.LowPart) || 1646 FatNoAsync) { 1647 1648 RtlZeroMemory( IrpContext->FatIoContext, sizeof(FAT_IO_CONTEXT) ); 1649 1650 KeInitializeEvent( &IrpContext->FatIoContext->Wait.SyncEvent, 1651 NotificationEvent, 1652 FALSE ); 1653 1654 SwitchBackToAsync = FALSE; 1655 1656 } else { 1657 1658 if (!FcbOrDcb->NonPaged->OutstandingAsyncEvent) { 1659 1660 FcbOrDcb->NonPaged->OutstandingAsyncEvent = 1661 FsRtlAllocatePoolWithTag( NonPagedPoolNx, 1662 sizeof(KEVENT), 1663 TAG_EVENT ); 1664 1665 KeInitializeEvent( FcbOrDcb->NonPaged->OutstandingAsyncEvent, 1666 NotificationEvent, 1667 FALSE ); 1668 } 1669 1670 // 1671 // If we are transitioning from 0 to 1, reset the event. 1672 // 1673 1674 if (ExInterlockedAddUlong( &FcbOrDcb->NonPaged->OutstandingAsyncWrites, 1675 1, 1676 &FatData.GeneralSpinLock ) == 0) { 1677 1678 KeClearEvent( FcbOrDcb->NonPaged->OutstandingAsyncEvent ); 1679 } 1680 1681 UnwindOutstandingAsync = TRUE; 1682 1683 IrpContext->FatIoContext->Wait.Async.NonPagedFcb = FcbOrDcb->NonPaged; 1684 } 1685 } 1686 1687 // 1688 // Now that we have the Fcb exclusive, get a new batch of 1689 // filesize and ValidDataLength. 1690 // 1691 1692 ValidDataToDisk = FcbOrDcb->ValidDataToDisk; 1693 ValidDataLength = FcbOrDcb->Header.ValidDataLength.LowPart; 1694 FileSize = FcbOrDcb->Header.FileSize.LowPart; 1695 1696 // 1697 // If this is PagingIo check again if any pruning is 1698 // required. It is important to start from basic 1699 // princples in case the file was *grown* ... 1700 // 1701 1702 if ( PagingIo ) { 1703 1704 if (StartingVbo >= FileSize) { 1705 Irp->IoStatus.Information = 0; 1706 try_return( Status = STATUS_SUCCESS ); 1707 } 1708 1709 ByteCount = IrpSp->Parameters.Write.Length; 1710 1711 if (ByteCount > FileSize - StartingVbo) { 1712 ByteCount = FileSize - StartingVbo; 1713 } 1714 } 1715 } 1716 1717 // 1718 // Remember the final requested byte count 1719 // 1720 1721 if (NonCachedIo && !Wait) { 1722 1723 IrpContext->FatIoContext->Wait.Async.RequestedByteCount = 1724 ByteCount; 1725 } 1726 1727 // 1728 // Remember the initial file size and valid data length, 1729 // just in case ..... 1730 // 1731 1732 InitialFileSize = FileSize; 1733 1734 InitialValidDataLength = ValidDataLength; 1735 1736 // 1737 // Make sure the FcbOrDcb is still good 1738 // 1739 1740 FatVerifyFcb( IrpContext, FcbOrDcb ); 1741 1742 // 1743 // Check for writing to end of File. If we are, then we have to 1744 // recalculate a number of fields. 1745 // 1746 1747 if ( WriteToEof ) { 1748 1749 StartingVbo = FileSize; 1750 StartingByte = FcbOrDcb->Header.FileSize; 1751 1752 // 1753 // Since we couldn't know this information until now, perform the 1754 // necessary bounds checking that we ommited at the top because 1755 // this is a WriteToEof operation. 1756 // 1757 1758 1759 if (!FatIsIoRangeValid( Vcb, StartingByte, ByteCount)) { 1760 1761 Irp->IoStatus.Information = 0; 1762 try_return( Status = STATUS_DISK_FULL ); 1763 } 1764 1765 1766 } 1767 1768 // 1769 // If this is a non paging write to a data stream object we have to 1770 // check for access according to the current state op/filelocks. 1771 // 1772 // Note that after this point, operations will be performed on the file. 1773 // No modifying activity can occur prior to this point in the write 1774 // path. 1775 // 1776 1777 if (!PagingIo && TypeOfOpen == UserFileOpen) { 1778 1779 Status = FsRtlCheckOplock( FatGetFcbOplock(FcbOrDcb), 1780 Irp, 1781 IrpContext, 1782 FatOplockComplete, 1783 FatPrePostIrp ); 1784 1785 if (Status != STATUS_SUCCESS) { 1786 1787 OplockPostIrp = TRUE; 1788 PostIrp = TRUE; 1789 try_return( NOTHING ); 1790 } 1791 1792 // 1793 // This oplock call can affect whether fast IO is possible. 1794 // We may have broken an oplock to no oplock held. If the 1795 // current state of the file is FastIoIsNotPossible then 1796 // recheck the fast IO state. 1797 // 1798 1799 if (FcbOrDcb->Header.IsFastIoPossible == FastIoIsNotPossible) { 1800 1801 FcbOrDcb->Header.IsFastIoPossible = FatIsFastIoPossible( FcbOrDcb ); 1802 } 1803 1804 // 1805 // And finally check the regular file locks. 1806 // 1807 1808 if (!FsRtlCheckLockForWriteAccess( &FcbOrDcb->Specific.Fcb.FileLock, Irp )) { 1809 1810 try_return( Status = STATUS_FILE_LOCK_CONFLICT ); 1811 } 1812 } 1813 1814 // 1815 // Determine if we will deal with extending the file. Note that 1816 // this implies extending valid data, and so we already have all 1817 // of the required synchronization done. 1818 // 1819 1820 if (!PagingIo && (StartingVbo + ByteCount > FileSize)) { 1821 1822 ExtendingFile = TRUE; 1823 } 1824 1825 if ( ExtendingFile ) { 1826 1827 1828 // 1829 // EXTENDING THE FILE 1830 // 1831 1832 // 1833 // For an extending write on hotplug media, we are going to defer the metadata 1834 // updates via Cc's lazy writer. They will also be flushed when the handle is closed. 1835 // 1836 1837 if (FlagOn(Vcb->VcbState, VCB_STATE_FLAG_DEFERRED_FLUSH)) { 1838 1839 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_DISABLE_WRITE_THROUGH); 1840 } 1841 1842 // 1843 // Update our local copy of FileSize 1844 // 1845 1846 FileSize = StartingVbo + ByteCount; 1847 1848 1849 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) { 1850 1851 FatLookupFileAllocationSize( IrpContext, FcbOrDcb ); 1852 } 1853 1854 // 1855 // If the write goes beyond the allocation size, add some 1856 // file allocation. 1857 // 1858 1859 1860 if ( (FileSize) > FcbOrDcb->Header.AllocationSize.LowPart ) { 1861 1862 1863 BOOLEAN AllocateMinimumSize = TRUE; 1864 1865 // 1866 // Only do allocation chuncking on writes if this is 1867 // not the first allocation added to the file. 1868 // 1869 1870 if (FcbOrDcb->Header.AllocationSize.LowPart != 0 ) { 1871 1872 ULONGLONG ApproximateClusterCount; 1873 ULONGLONG TargetAllocation; 1874 ULONGLONG AddedAllocation; 1875 ULONGLONG Multiplier; 1876 ULONG BytesPerCluster; 1877 ULONG ClusterAlignedFileSize; 1878 1879 // 1880 // We are going to try and allocate a bigger chunk than 1881 // we actually need in order to maximize FastIo usage. 1882 // 1883 // The multiplier is computed as follows: 1884 // 1885 // 1886 // (FreeDiskSpace ) 1887 // Mult = ( (-------------------------) / 32 ) + 1 1888 // (FileSize - AllocationSize) 1889 // 1890 // and max out at 32. 1891 // 1892 // With this formula we start winding down chunking 1893 // as we get near the disk space wall. 1894 // 1895 // For instance on an empty 1 MEG floppy doing an 8K 1896 // write, the multiplier is 6, or 48K to allocate. 1897 // When this disk is half full, the multipler is 3, 1898 // and when it is 3/4 full, the mupltiplier is only 1. 1899 // 1900 // On a larger disk, the multiplier for a 8K read will 1901 // reach its maximum of 32 when there is at least ~8 Megs 1902 // available. 1903 // 1904 1905 // 1906 // Small write performance note, use cluster aligned 1907 // file size in above equation. 1908 // 1909 1910 // 1911 // We need to carefully consider what happens when we approach 1912 // a 2^32 byte filesize. Overflows will cause problems. 1913 // 1914 1915 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster; 1916 1917 // 1918 // This can overflow if the target filesize is in the last cluster. 1919 // In this case, we can obviously skip over all of this fancy 1920 // logic and just max out the file right now. 1921 // 1922 1923 1924 ClusterAlignedFileSize = ((FileSize) + (BytesPerCluster - 1)) & 1925 ~(BytesPerCluster - 1); 1926 1927 1928 if (ClusterAlignedFileSize != 0) { 1929 1930 // 1931 // This actually has a chance but the possibility of overflowing 1932 // the numerator is pretty unlikely, made more unlikely by moving 1933 // the divide by 32 up to scale the BytesPerCluster. However, even if it does the 1934 // effect is completely benign. 1935 // 1936 // FAT32 with a 64k cluster and over 2^21 clusters would do it (and 1937 // so forth - 2^(16 - 5 + 21) == 2^32). Since this implies a partition 1938 // of 32gb and a number of clusters (and cluster size) we plan to 1939 // disallow in format for FAT32, the odds of this happening are pretty 1940 // low anyway. 1941 Multiplier = ((Vcb->AllocationSupport.NumberOfFreeClusters * 1942 (BytesPerCluster >> 5)) / 1943 (ClusterAlignedFileSize - 1944 FcbOrDcb->Header.AllocationSize.LowPart)) + 1; 1945 1946 if (Multiplier > 32) { Multiplier = 32; } 1947 1948 // These computations will never overflow a ULONGLONG because a file is capped at 4GB, and 1949 // a single write can be a max of 4GB. 1950 AddedAllocation = Multiplier * (ClusterAlignedFileSize - FcbOrDcb->Header.AllocationSize.LowPart); 1951 1952 TargetAllocation = FcbOrDcb->Header.AllocationSize.LowPart + AddedAllocation; 1953 1954 // 1955 // We know that TargetAllocation is in whole clusters. Now 1956 // we check if it exceeded the maximum valid FAT file size. 1957 // If it did, we fall back to allocating up to the maximum legal size. 1958 // 1959 1960 if (TargetAllocation > ~BytesPerCluster + 1) { 1961 1962 TargetAllocation = ~BytesPerCluster + 1; 1963 AddedAllocation = TargetAllocation - FcbOrDcb->Header.AllocationSize.LowPart; 1964 } 1965 1966 // 1967 // Now do an unsafe check here to see if we should even 1968 // try to allocate this much. If not, just allocate 1969 // the minimum size we need, if so so try it, but if it 1970 // fails, just allocate the minimum size we need. 1971 // 1972 1973 ApproximateClusterCount = (AddedAllocation / BytesPerCluster); 1974 1975 if (ApproximateClusterCount <= Vcb->AllocationSupport.NumberOfFreeClusters) { 1976 1977 _SEH2_TRY { 1978 1979 FatAddFileAllocation( IrpContext, 1980 FcbOrDcb, 1981 FileObject, 1982 (ULONG)TargetAllocation ); 1983 1984 AllocateMinimumSize = FALSE; 1985 SetFlag( FcbOrDcb->FcbState, FCB_STATE_TRUNCATE_ON_CLOSE ); 1986 1987 } _SEH2_EXCEPT( _SEH2_GetExceptionCode() == STATUS_DISK_FULL ? 1988 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH ) { 1989 1990 FatResetExceptionState( IrpContext ); 1991 } _SEH2_END; 1992 } 1993 } 1994 } 1995 1996 if ( AllocateMinimumSize ) { 1997 1998 1999 FatAddFileAllocation( IrpContext, 2000 FcbOrDcb, 2001 FileObject, 2002 FileSize ); 2003 2004 2005 } 2006 2007 // 2008 // Assert that the allocation worked 2009 // 2010 2011 2012 NT_ASSERT( FcbOrDcb->Header.AllocationSize.LowPart >= FileSize ); 2013 2014 2015 } 2016 2017 // 2018 // Set the new file size in the Fcb 2019 // 2020 2021 2022 NT_ASSERT( FileSize <= FcbOrDcb->Header.AllocationSize.LowPart ); 2023 2024 2025 FcbOrDcb->Header.FileSize.LowPart = FileSize; 2026 2027 // 2028 // Extend the cache map, letting mm knows the new file size. 2029 // We only have to do this if the file is cached. 2030 // 2031 2032 if (CcIsFileCached(FileObject)) { 2033 CcSetFileSizes( FileObject, (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize ); 2034 } 2035 } 2036 2037 // 2038 // Determine if we will deal with extending valid data. 2039 // 2040 2041 if ( !CalledByLazyWriter && 2042 !RecursiveWriteThrough && 2043 (StartingVbo + ByteCount > ValidDataLength) ) { 2044 2045 ExtendingValidData = TRUE; 2046 2047 } else { 2048 2049 // 2050 // If not extending valid data, and we otherwise believe we 2051 // could demote from exclusive to shared, do so. This will 2052 // occur when we synchronize tight for noncached coherency 2053 // but must defer the demotion until after we decide about 2054 // valid data length, which requires it exclusive. Since we 2055 // can't drop/re-pick the resources without letting a pagefault 2056 // squirt through, the resource decision was kept up in the air 2057 // until now. 2058 // 2059 // Note that we've still got PagingIo exclusive in these cases. 2060 // 2061 2062 if (FcbCanDemoteToShared) { 2063 2064 NT_ASSERT( FcbAcquiredExclusive && ExIsResourceAcquiredExclusiveLite( FcbOrDcb->Header.Resource )); 2065 ExConvertExclusiveToSharedLite( FcbOrDcb->Header.Resource ); 2066 FcbAcquiredExclusive = FALSE; 2067 } 2068 } 2069 2070 if (ValidDataToDisk > ValidDataLength) { 2071 2072 ValidDataToCheck = ValidDataToDisk; 2073 2074 } else { 2075 2076 ValidDataToCheck = ValidDataLength; 2077 } 2078 2079 2080 2081 // 2082 // HANDLE THE NON-CACHED CASE 2083 // 2084 2085 if ( NonCachedIo ) { 2086 2087 // 2088 // Declare some local variables for enumeration through the 2089 // runs of the file, and an array to store parameters for 2090 // parallel I/Os 2091 // 2092 2093 ULONG SectorSize; 2094 2095 ULONG BytesToWrite; 2096 2097 DebugTrace(0, Dbg, "Non cached write.\n", 0); 2098 2099 // 2100 // Round up to sector boundry. The end of the write interval 2101 // must, however, be beyond EOF. 2102 // 2103 2104 SectorSize = (ULONG)Vcb->Bpb.BytesPerSector; 2105 2106 BytesToWrite = (ByteCount + (SectorSize - 1)) 2107 & ~(SectorSize - 1); 2108 2109 // 2110 // All requests should be well formed and 2111 // make sure we don't wipe out any data 2112 // 2113 2114 if (((StartingVbo & (SectorSize - 1)) != 0) || 2115 2116 ((BytesToWrite != ByteCount) && 2117 (StartingVbo + ByteCount < ValidDataLength))) { 2118 2119 NT_ASSERT( FALSE ); 2120 2121 DebugTrace( 0, Dbg, "FatCommonWrite -> STATUS_NOT_IMPLEMENTED\n", 0); 2122 try_return( Status = STATUS_NOT_IMPLEMENTED ); 2123 } 2124 2125 // 2126 // If this noncached transfer is at least one sector beyond 2127 // the current ValidDataLength in the Fcb, then we have to 2128 // zero the sectors in between. This can happen if the user 2129 // has opened the file noncached, or if the user has mapped 2130 // the file and modified a page beyond ValidDataLength. It 2131 // *cannot* happen if the user opened the file cached, because 2132 // ValidDataLength in the Fcb is updated when he does the cached 2133 // write (we also zero data in the cache at that time), and 2134 // therefore, we will bypass this test when the data 2135 // is ultimately written through (by the Lazy Writer). 2136 // 2137 // For the paging file we don't care about security (ie. 2138 // stale data), do don't bother zeroing. 2139 // 2140 // We can actually get writes wholly beyond valid data length 2141 // from the LazyWriter because of paging Io decoupling. 2142 // 2143 2144 if (!CalledByLazyWriter && 2145 !RecursiveWriteThrough && 2146 (StartingVbo > ValidDataToCheck)) { 2147 2148 FatZeroData( IrpContext, 2149 Vcb, 2150 FileObject, 2151 ValidDataToCheck, 2152 StartingVbo - ValidDataToCheck ); 2153 } 2154 2155 // 2156 // Make sure we write FileSize to the dirent if we 2157 // are extending it and we are successful. (This may or 2158 // may not occur Write Through, but that is fine.) 2159 // 2160 2161 WriteFileSizeToDirent = TRUE; 2162 2163 // 2164 // Perform the actual IO 2165 // 2166 2167 if (SwitchBackToAsync) { 2168 2169 Wait = FALSE; 2170 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ); 2171 } 2172 2173 #ifdef SYSCACHE_COMPILE 2174 2175 #define MY_SIZE 0x1000000 2176 #define LONGMAP_COUNTER 2177 2178 #ifdef BITMAP 2179 // 2180 // Maintain a bitmap of IO started on this file. 2181 // 2182 2183 { 2184 PULONG WriteMask = FcbOrDcb->WriteMask; 2185 2186 if (NULL == WriteMask) { 2187 2188 WriteMask = FsRtlAllocatePoolWithTag( NonPagedPoolNx, 2189 (MY_SIZE/PAGE_SIZE) / 8, 2190 'wtaF' ); 2191 2192 FcbOrDcb->WriteMask = WriteMask; 2193 RtlZeroMemory(WriteMask, (MY_SIZE/PAGE_SIZE) / 8); 2194 } 2195 2196 if (StartingVbo < MY_SIZE) { 2197 2198 ULONG Off = StartingVbo; 2199 ULONG Len = BytesToWrite; 2200 2201 if (Off + Len > MY_SIZE) { 2202 Len = MY_SIZE - Off; 2203 } 2204 2205 while (Len != 0) { 2206 WriteMask[(Off/PAGE_SIZE) / 32] |= 2207 1 << (Off/PAGE_SIZE) % 32; 2208 2209 Off += PAGE_SIZE; 2210 if (Len <= PAGE_SIZE) { 2211 break; 2212 } 2213 Len -= PAGE_SIZE; 2214 } 2215 } 2216 } 2217 #endif 2218 2219 #ifdef LONGMAP_COUNTER 2220 // 2221 // Maintain a longmap of IO started on this file, each ulong containing 2222 // the value of an ascending counter per write (gives us order information). 2223 // 2224 // Unlike the old bitmask stuff, this is mostly well synchronized. 2225 // 2226 2227 { 2228 PULONG WriteMask = (PULONG)FcbOrDcb->WriteMask; 2229 2230 if (NULL == WriteMask) { 2231 2232 WriteMask = FsRtlAllocatePoolWithTag( NonPagedPoolNx, 2233 (MY_SIZE/PAGE_SIZE) * sizeof(ULONG), 2234 'wtaF' ); 2235 2236 FcbOrDcb->WriteMask = WriteMask; 2237 RtlZeroMemory(WriteMask, (MY_SIZE/PAGE_SIZE) * sizeof(ULONG)); 2238 } 2239 2240 if (StartingVbo < MY_SIZE) { 2241 2242 ULONG Off = StartingVbo; 2243 ULONG Len = BytesToWrite; 2244 ULONG Tick = InterlockedIncrement( &FcbOrDcb->WriteMaskData ); 2245 2246 if (Off + Len > MY_SIZE) { 2247 Len = MY_SIZE - Off; 2248 } 2249 2250 while (Len != 0) { 2251 InterlockedExchange( WriteMask + Off/PAGE_SIZE, Tick ); 2252 2253 Off += PAGE_SIZE; 2254 if (Len <= PAGE_SIZE) { 2255 break; 2256 } 2257 Len -= PAGE_SIZE; 2258 } 2259 } 2260 } 2261 #endif 2262 2263 #endif 2264 2265 2266 if (FatNonCachedIo( IrpContext, 2267 Irp, 2268 FcbOrDcb, 2269 StartingVbo, 2270 BytesToWrite, 2271 BytesToWrite, 2272 0) == STATUS_PENDING) { 2273 2274 2275 UnwindOutstandingAsync = FALSE; 2276 2277 #ifdef _MSC_VER 2278 #pragma prefast( suppress:28931, "convenient for debugging" ) 2279 #endif 2280 Wait = TRUE; 2281 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ); 2282 2283 IrpContext->FatIoContext = NULL; 2284 Irp = NULL; 2285 2286 // 2287 // As a matter of fact, if we hit this we are in deep trouble 2288 // if VDL is being extended. We are no longer attached to the 2289 // IRP, and have thus lost synchronization. Note that we should 2290 // not hit this case anymore since we will not re-async vdl extension. 2291 // 2292 2293 NT_ASSERT( !ExtendingValidData ); 2294 2295 try_return( Status = STATUS_PENDING ); 2296 } 2297 2298 // 2299 // If the call didn't succeed, raise the error status 2300 // 2301 2302 if (!NT_SUCCESS( Status = Irp->IoStatus.Status )) { 2303 2304 FatNormalizeAndRaiseStatus( IrpContext, Status ); 2305 2306 } else { 2307 2308 ULONG NewValidDataToDisk; 2309 2310 // 2311 // Else set the context block to reflect the entire write 2312 // Also assert we got how many bytes we asked for. 2313 // 2314 2315 NT_ASSERT( Irp->IoStatus.Information == BytesToWrite ); 2316 2317 Irp->IoStatus.Information = ByteCount; 2318 2319 // 2320 // Take this opportunity to update ValidDataToDisk. 2321 // 2322 2323 NewValidDataToDisk = StartingVbo + ByteCount; 2324 2325 if (NewValidDataToDisk > FileSize) { 2326 NewValidDataToDisk = FileSize; 2327 } 2328 2329 if (FcbOrDcb->ValidDataToDisk < NewValidDataToDisk) { 2330 FcbOrDcb->ValidDataToDisk = NewValidDataToDisk; 2331 } 2332 } 2333 2334 // 2335 // The transfer is either complete, or the Iosb contains the 2336 // appropriate status. 2337 // 2338 2339 try_return( Status ); 2340 2341 } // if No Intermediate Buffering 2342 2343 2344 // 2345 // HANDLE CACHED CASE 2346 // 2347 2348 else { 2349 2350 NT_ASSERT( !PagingIo ); 2351 2352 // 2353 // We delay setting up the file cache until now, in case the 2354 // caller never does any I/O to the file, and thus 2355 // FileObject->PrivateCacheMap == NULL. 2356 // 2357 2358 if ( FileObject->PrivateCacheMap == NULL ) { 2359 2360 DebugTrace(0, Dbg, "Initialize cache mapping.\n", 0); 2361 2362 // 2363 // Get the file allocation size, and if it is less than 2364 // the file size, raise file corrupt error. 2365 // 2366 2367 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) { 2368 2369 FatLookupFileAllocationSize( IrpContext, FcbOrDcb ); 2370 } 2371 2372 if ( FileSize > FcbOrDcb->Header.AllocationSize.LowPart ) { 2373 2374 FatPopUpFileCorrupt( IrpContext, FcbOrDcb ); 2375 2376 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR ); 2377 } 2378 2379 // 2380 // Now initialize the cache map. 2381 // 2382 2383 FatInitializeCacheMap( FileObject, 2384 (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize, 2385 FALSE, 2386 &FatData.CacheManagerCallbacks, 2387 FcbOrDcb ); 2388 2389 CcSetReadAheadGranularity( FileObject, READ_AHEAD_GRANULARITY ); 2390 2391 // 2392 // Special case large floppy tranfers, and make the file 2393 // object write through. For small floppy transfers, 2394 // set a timer to go off in a second and flush the file. 2395 // 2396 // 2397 2398 if (!FlagOn( FileObject->Flags, FO_WRITE_THROUGH ) && 2399 FlagOn(Vcb->VcbState, VCB_STATE_FLAG_DEFERRED_FLUSH)) { 2400 2401 if (((StartingByte.LowPart & (PAGE_SIZE-1)) == 0) && 2402 (ByteCount >= PAGE_SIZE)) { 2403 2404 SetFlag( FileObject->Flags, FO_WRITE_THROUGH ); 2405 2406 } else { 2407 2408 LARGE_INTEGER OneSecondFromNow; 2409 PDEFERRED_FLUSH_CONTEXT FlushContext; 2410 2411 // 2412 // Get pool and initialize the timer and DPC 2413 // 2414 2415 FlushContext = FsRtlAllocatePoolWithTag( NonPagedPoolNx, 2416 sizeof(DEFERRED_FLUSH_CONTEXT), 2417 TAG_DEFERRED_FLUSH_CONTEXT ); 2418 2419 KeInitializeTimer( &FlushContext->Timer ); 2420 2421 KeInitializeDpc( &FlushContext->Dpc, 2422 FatDeferredFlushDpc, 2423 FlushContext ); 2424 2425 2426 // 2427 // We have to reference the file object here. 2428 // 2429 2430 ObReferenceObject( FileObject ); 2431 2432 FlushContext->File = FileObject; 2433 2434 // 2435 // Let'er rip! 2436 // 2437 2438 OneSecondFromNow.QuadPart = (LONG)-1*1000*1000*10; 2439 2440 KeSetTimer( &FlushContext->Timer, 2441 OneSecondFromNow, 2442 &FlushContext->Dpc ); 2443 } 2444 } 2445 } 2446 2447 // 2448 // If this write is beyond valid data length, then we 2449 // must zero the data in between. 2450 // 2451 2452 if ( StartingVbo > ValidDataToCheck ) { 2453 2454 // 2455 // Call the Cache Manager to zero the data. 2456 // 2457 2458 if (!FatZeroData( IrpContext, 2459 Vcb, 2460 FileObject, 2461 ValidDataToCheck, 2462 StartingVbo - ValidDataToCheck )) { 2463 2464 DebugTrace( 0, Dbg, "Cached Write could not wait to zero\n", 0 ); 2465 2466 try_return( PostIrp = TRUE ); 2467 } 2468 } 2469 2470 WriteFileSizeToDirent = BooleanFlagOn(IrpContext->Flags, 2471 IRP_CONTEXT_FLAG_WRITE_THROUGH); 2472 2473 2474 // 2475 // DO A NORMAL CACHED WRITE, if the MDL bit is not set, 2476 // 2477 2478 if (!FlagOn(IrpContext->MinorFunction, IRP_MN_MDL)) { 2479 2480 DebugTrace(0, Dbg, "Cached write.\n", 0); 2481 2482 // 2483 // Get hold of the user's buffer. 2484 // 2485 2486 SystemBuffer = FatMapUserBuffer( IrpContext, Irp ); 2487 2488 // 2489 // Do the write, possibly writing through 2490 // 2491 2492 #if (NTDDI_VERSION >= NTDDI_WIN8) 2493 if (!CcCopyWriteEx( FileObject, 2494 &StartingByte, 2495 ByteCount, 2496 Wait, 2497 SystemBuffer, 2498 Irp->Tail.Overlay.Thread )) { 2499 #else 2500 if (!CcCopyWrite( FileObject, 2501 &StartingByte, 2502 ByteCount, 2503 Wait, 2504 SystemBuffer )) { 2505 #endif 2506 2507 DebugTrace( 0, Dbg, "Cached Write could not wait\n", 0 ); 2508 2509 try_return( PostIrp = TRUE ); 2510 } 2511 2512 Irp->IoStatus.Status = STATUS_SUCCESS; 2513 Irp->IoStatus.Information = ByteCount; 2514 2515 try_return( Status = STATUS_SUCCESS ); 2516 2517 } else { 2518 2519 // 2520 // DO AN MDL WRITE 2521 // 2522 2523 DebugTrace(0, Dbg, "MDL write.\n", 0); 2524 2525 NT_ASSERT( Wait ); 2526 2527 CcPrepareMdlWrite( FileObject, 2528 &StartingByte, 2529 ByteCount, 2530 &Irp->MdlAddress, 2531 &Irp->IoStatus ); 2532 2533 Status = Irp->IoStatus.Status; 2534 2535 try_return( Status ); 2536 } 2537 } 2538 } 2539 2540 // 2541 // These two cases correspond to a system write directory file and 2542 // ea file. 2543 // 2544 2545 if (( TypeOfOpen == DirectoryFile ) || ( TypeOfOpen == EaFile) 2546 ) { 2547 2548 ULONG SectorSize; 2549 2550 #if FASTFATDBG 2551 if ( TypeOfOpen == DirectoryFile ) { 2552 DebugTrace(0, Dbg, "Type of write is directoryfile\n", 0); 2553 } else if ( TypeOfOpen == EaFile) { 2554 DebugTrace(0, Dbg, "Type of write is eafile\n", 0); 2555 } 2556 #endif 2557 2558 // 2559 // Make sure the FcbOrDcb is still good 2560 // 2561 2562 FatVerifyFcb( IrpContext, FcbOrDcb ); 2563 2564 // 2565 // Synchronize here with people deleting directories and 2566 // mucking with the internals of the EA file. 2567 // 2568 2569 if (!ExAcquireSharedStarveExclusive( FcbOrDcb->Header.PagingIoResource, 2570 Wait )) { 2571 2572 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb ); 2573 2574 try_return( PostIrp = TRUE ); 2575 } 2576 2577 PagingIoResourceAcquired = TRUE; 2578 2579 if (!Wait) { 2580 2581 IrpContext->FatIoContext->Wait.Async.Resource = 2582 FcbOrDcb->Header.PagingIoResource; 2583 } 2584 2585 // 2586 // Check to see if we colided with a MoveFile call, and if 2587 // so block until it completes. 2588 // 2589 2590 if (FcbOrDcb->MoveFileEvent) { 2591 2592 (VOID)KeWaitForSingleObject( FcbOrDcb->MoveFileEvent, 2593 Executive, 2594 KernelMode, 2595 FALSE, 2596 NULL ); 2597 } 2598 2599 // 2600 // If we weren't called by the Lazy Writer, then this write 2601 // must be the result of a write-through or flush operation. 2602 // Setting the IrpContext flag, will cause DevIoSup.c to 2603 // write-through the data to the disk. 2604 // 2605 2606 if (!FlagOn((ULONG_PTR)IoGetTopLevelIrp(), FSRTL_CACHE_TOP_LEVEL_IRP)) { 2607 2608 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH ); 2609 } 2610 2611 // 2612 // For the noncached case, assert that everything is sector 2613 // alligned. 2614 // 2615 2616 #ifdef _MSC_VER 2617 #pragma prefast( suppress:28931, "needed for debug build" ) 2618 #endif 2619 SectorSize = (ULONG)Vcb->Bpb.BytesPerSector; 2620 2621 // 2622 // We make several assumptions about these two types of files. 2623 // Make sure all of them are true. 2624 // 2625 2626 NT_ASSERT( NonCachedIo && PagingIo ); 2627 NT_ASSERT( ((StartingVbo | ByteCount) & (SectorSize - 1)) == 0 ); 2628 2629 2630 // 2631 // These calls must always be within the allocation size, which is 2632 // convienently the same as filesize, which conveniently doesn't 2633 // get reset to a hint value when we verify the volume. 2634 // 2635 2636 if (StartingVbo >= FcbOrDcb->Header.FileSize.LowPart) { 2637 2638 DebugTrace( 0, Dbg, "PagingIo dirent started beyond EOF.\n", 0 ); 2639 2640 Irp->IoStatus.Information = 0; 2641 2642 try_return( Status = STATUS_SUCCESS ); 2643 } 2644 2645 if ( StartingVbo + ByteCount > FcbOrDcb->Header.FileSize.LowPart ) { 2646 2647 DebugTrace( 0, Dbg, "PagingIo dirent extending beyond EOF.\n", 0 ); 2648 ByteCount = FcbOrDcb->Header.FileSize.LowPart - StartingVbo; 2649 } 2650 2651 2652 // 2653 // Perform the actual IO 2654 // 2655 2656 if (FatNonCachedIo( IrpContext, 2657 Irp, 2658 FcbOrDcb, 2659 StartingVbo, 2660 ByteCount, 2661 ByteCount, 2662 0 ) == STATUS_PENDING) { 2663 2664 IrpContext->FatIoContext = NULL; 2665 2666 Irp = NULL; 2667 2668 try_return( Status = STATUS_PENDING ); 2669 } 2670 2671 // 2672 // The transfer is either complete, or the Iosb contains the 2673 // appropriate status. 2674 // 2675 // Also, mark the volume as needing verification to automatically 2676 // clean up stuff. 2677 // 2678 2679 if (!NT_SUCCESS( Status = Irp->IoStatus.Status )) { 2680 2681 FatNormalizeAndRaiseStatus( IrpContext, Status ); 2682 } 2683 2684 try_return( Status ); 2685 } 2686 2687 // 2688 // This is the case of a user who openned a directory. No writing is 2689 // allowed. 2690 // 2691 2692 if ( TypeOfOpen == UserDirectoryOpen ) { 2693 2694 DebugTrace( 0, Dbg, "FatCommonWrite -> STATUS_INVALID_PARAMETER\n", 0); 2695 2696 try_return( Status = STATUS_INVALID_PARAMETER ); 2697 } 2698 2699 // 2700 // If we get this far, something really serious is wrong. 2701 // 2702 2703 DebugDump("Illegal TypeOfOpen\n", 0, FcbOrDcb ); 2704 2705 #ifdef _MSC_VER 2706 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" ) 2707 #endif 2708 FatBugCheck( TypeOfOpen, (ULONG_PTR) FcbOrDcb, 0 ); 2709 2710 try_exit: NOTHING; 2711 2712 2713 // 2714 // If the request was not posted and there is still an Irp, 2715 // deal with it. 2716 // 2717 2718 if (Irp) { 2719 2720 if ( !PostIrp ) { 2721 2722 ULONG ActualBytesWrote; 2723 2724 DebugTrace( 0, Dbg, "Completing request with status = %08lx\n", 2725 Status); 2726 2727 DebugTrace( 0, Dbg, " Information = %08lx\n", 2728 Irp->IoStatus.Information); 2729 2730 // 2731 // Record the total number of bytes actually written 2732 // 2733 2734 ActualBytesWrote = (ULONG)Irp->IoStatus.Information; 2735 2736 // 2737 // If the file was opened for Synchronous IO, update the current 2738 // file position. 2739 // 2740 2741 if (SynchronousIo && !PagingIo) { 2742 2743 FileObject->CurrentByteOffset.LowPart = 2744 StartingVbo + (NT_ERROR( Status ) ? 0 : ActualBytesWrote); 2745 } 2746 2747 // 2748 // The following are things we only do if we were successful 2749 // 2750 2751 if ( NT_SUCCESS( Status ) ) { 2752 2753 // 2754 // If this was not PagingIo, mark that the modify 2755 // time on the dirent needs to be updated on close. 2756 // 2757 2758 if ( !PagingIo ) { 2759 2760 SetFlag( FileObject->Flags, FO_FILE_MODIFIED ); 2761 } 2762 2763 // 2764 // If we extended the file size and we are meant to 2765 // immediately update the dirent, do so. (This flag is 2766 // set for either Write Through or noncached, because 2767 // in either case the data and any necessary zeros are 2768 // actually written to the file.) 2769 // 2770 2771 if ( ExtendingFile && WriteFileSizeToDirent ) { 2772 2773 NT_ASSERT( FileObject->DeleteAccess || FileObject->WriteAccess ); 2774 2775 FatSetFileSizeInDirent( IrpContext, FcbOrDcb, NULL ); 2776 2777 // 2778 // Report that a file size has changed. 2779 // 2780 2781 FatNotifyReportChange( IrpContext, 2782 Vcb, 2783 FcbOrDcb, 2784 FILE_NOTIFY_CHANGE_SIZE, 2785 FILE_ACTION_MODIFIED ); 2786 } 2787 2788 if ( ExtendingFile && !WriteFileSizeToDirent ) { 2789 2790 SetFlag( FileObject->Flags, FO_FILE_SIZE_CHANGED ); 2791 } 2792 2793 if ( ExtendingValidData ) { 2794 2795 ULONG EndingVboWritten = StartingVbo + ActualBytesWrote; 2796 2797 // 2798 // Never set a ValidDataLength greater than FileSize. 2799 // 2800 2801 if ( FileSize < EndingVboWritten ) { 2802 2803 FcbOrDcb->Header.ValidDataLength.LowPart = FileSize; 2804 2805 } else { 2806 2807 FcbOrDcb->Header.ValidDataLength.LowPart = EndingVboWritten; 2808 } 2809 2810 // 2811 // Now, if we are noncached and the file is cached, we must 2812 // tell the cache manager about the VDL extension so that 2813 // async cached IO will not be optimized into zero-page faults 2814 // beyond where it believes VDL is. 2815 // 2816 // In the cached case, since Cc did the work, it has updated 2817 // itself already. 2818 // 2819 2820 if (NonCachedIo && CcIsFileCached(FileObject)) { 2821 CcSetFileSizes( FileObject, (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize ); 2822 } 2823 } 2824 2825 } 2826 2827 // 2828 // Note that we have to unpin repinned Bcbs here after the above 2829 // work, but if we are going to post the request, we must do this 2830 // before the post (below). 2831 // 2832 2833 FatUnpinRepinnedBcbs( IrpContext ); 2834 2835 } else { 2836 2837 // 2838 // Take action if the Oplock package is not going to post the Irp. 2839 // 2840 2841 if (!OplockPostIrp) { 2842 2843 FatUnpinRepinnedBcbs( IrpContext ); 2844 2845 if ( ExtendingFile ) { 2846 2847 // 2848 // We need the PagingIo resource exclusive whenever we 2849 // pull back either file size or valid data length. 2850 // 2851 2852 NT_ASSERT( FcbOrDcb->Header.PagingIoResource != NULL ); 2853 2854 (VOID)ExAcquireResourceExclusiveLite(FcbOrDcb->Header.PagingIoResource, TRUE); 2855 2856 FcbOrDcb->Header.FileSize.LowPart = InitialFileSize; 2857 2858 NT_ASSERT( FcbOrDcb->Header.FileSize.LowPart <= FcbOrDcb->Header.AllocationSize.LowPart ); 2859 2860 // 2861 // Pull back the cache map as well 2862 // 2863 2864 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) { 2865 2866 *CcGetFileSizePointer(FileObject) = FcbOrDcb->Header.FileSize; 2867 } 2868 2869 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource ); 2870 } 2871 2872 DebugTrace( 0, Dbg, "Passing request to Fsp\n", 0 ); 2873 2874 Status = FatFsdPostRequest(IrpContext, Irp); 2875 } 2876 } 2877 } 2878 2879 } _SEH2_FINALLY { 2880 2881 DebugUnwind( FatCommonWrite ); 2882 2883 if (_SEH2_AbnormalTermination()) { 2884 2885 // 2886 // Restore initial file size and valid data length 2887 // 2888 2889 if (ExtendingFile || ExtendingValidData) { 2890 2891 // 2892 // We got an error, pull back the file size if we extended it. 2893 // 2894 2895 FcbOrDcb->Header.FileSize.LowPart = InitialFileSize; 2896 FcbOrDcb->Header.ValidDataLength.LowPart = InitialValidDataLength; 2897 2898 NT_ASSERT( FcbOrDcb->Header.FileSize.LowPart <= FcbOrDcb->Header.AllocationSize.LowPart ); 2899 2900 // 2901 // Pull back the cache map as well 2902 // 2903 2904 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) { 2905 2906 *CcGetFileSizePointer(FileObject) = FcbOrDcb->Header.FileSize; 2907 } 2908 } 2909 } 2910 2911 // 2912 // Check if this needs to be backed out. 2913 // 2914 2915 if (UnwindOutstandingAsync) { 2916 2917 ExInterlockedAddUlong( &FcbOrDcb->NonPaged->OutstandingAsyncWrites, 2918 0xffffffff, 2919 &FatData.GeneralSpinLock ); 2920 } 2921 2922 // 2923 // If the FcbOrDcb has been acquired, release it. 2924 // 2925 2926 if (FcbOrDcbAcquired && Irp) { 2927 2928 FatReleaseFcb( NULL, FcbOrDcb ); 2929 } 2930 2931 if (PagingIoResourceAcquired && Irp) { 2932 2933 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource ); 2934 } 2935 2936 // 2937 // Complete the request if we didn't post it and no exception 2938 // 2939 // Note that FatCompleteRequest does the right thing if either 2940 // IrpContext or Irp are NULL 2941 // 2942 2943 if ( !PostIrp && !_SEH2_AbnormalTermination() ) { 2944 2945 FatCompleteRequest( IrpContext, Irp, Status ); 2946 } 2947 2948 DebugTrace(-1, Dbg, "FatCommonWrite -> %08lx\n", Status ); 2949 } _SEH2_END; 2950 2951 return Status; 2952 } 2953 2954 2955 // 2956 // Local support routine 2957 // 2958 2959 VOID 2960 NTAPI 2961 FatDeferredFlushDpc ( 2962 _In_ PKDPC Dpc, 2963 _In_opt_ PVOID DeferredContext, 2964 _In_opt_ PVOID SystemArgument1, 2965 _In_opt_ PVOID SystemArgument2 2966 ) 2967 2968 /*++ 2969 2970 Routine Description: 2971 2972 This routine is dispatched 1 second after a small write to a deferred 2973 write device that initialized the cache map. It exqueues an executive 2974 worker thread to perform the actual task of flushing the file. 2975 2976 Arguments: 2977 2978 DeferredContext - Contains the deferred flush context. 2979 2980 Return Value: 2981 2982 None. 2983 2984 --*/ 2985 2986 { 2987 PDEFERRED_FLUSH_CONTEXT FlushContext; 2988 2989 UNREFERENCED_PARAMETER( SystemArgument1 ); 2990 UNREFERENCED_PARAMETER( SystemArgument2 ); 2991 UNREFERENCED_PARAMETER( Dpc ); 2992 2993 FlushContext = (PDEFERRED_FLUSH_CONTEXT)DeferredContext; 2994 2995 // 2996 // Send it off 2997 // 2998 2999 ExInitializeWorkItem( &FlushContext->Item, 3000 FatDeferredFlush, 3001 FlushContext ); 3002 3003 #ifdef _MSC_VER 3004 #pragma prefast( suppress:28159, "prefast indicates this API is obsolete, but it's ok for fastfat to keep using it" ) 3005 #endif 3006 ExQueueWorkItem( &FlushContext->Item, CriticalWorkQueue ); 3007 } 3008 3009 3010 // 3011 // Local support routine 3012 // 3013 3014 VOID 3015 NTAPI 3016 FatDeferredFlush ( 3017 _In_ PVOID Parameter 3018 ) 3019 3020 /*++ 3021 3022 Routine Description: 3023 3024 This routine performs the actual task of flushing the file. 3025 3026 Arguments: 3027 3028 DeferredContext - Contains the deferred flush context. 3029 3030 Return Value: 3031 3032 None. 3033 3034 --*/ 3035 3036 { 3037 3038 PFILE_OBJECT File; 3039 PVCB Vcb; 3040 PFCB FcbOrDcb; 3041 PCCB Ccb; 3042 3043 PAGED_CODE(); 3044 3045 File = ((PDEFERRED_FLUSH_CONTEXT)Parameter)->File; 3046 3047 FatDecodeFileObject(File, &Vcb, &FcbOrDcb, &Ccb); 3048 NT_ASSERT( FcbOrDcb != NULL ); 3049 3050 // 3051 // Make us appear as a top level FSP request so that we will 3052 // receive any errors from the flush. 3053 // 3054 3055 IoSetTopLevelIrp( (PIRP)FSRTL_FSP_TOP_LEVEL_IRP ); 3056 3057 ExAcquireResourceExclusiveLite( FcbOrDcb->Header.Resource, TRUE ); 3058 ExAcquireResourceSharedLite( FcbOrDcb->Header.PagingIoResource, TRUE ); 3059 3060 CcFlushCache( File->SectionObjectPointer, NULL, 0, NULL ); 3061 3062 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource ); 3063 ExReleaseResourceLite( FcbOrDcb->Header.Resource ); 3064 3065 IoSetTopLevelIrp( NULL ); 3066 3067 ObDereferenceObject( File ); 3068 3069 ExFreePool( Parameter ); 3070 3071 } 3072 3073 3074