1 /*++
2
3 Copyright (c) 1989-2000 Microsoft Corporation
4
5 Module Name:
6
7 DevIoSup.c
8
9 Abstract:
10
11 This module implements the low lever disk read/write support for Fat.
12
13
14 --*/
15
16 #include "fatprocs.h"
17
18 //
19 // The Bug check file id for this module
20 //
21
22 #define BugCheckFileId (FAT_BUG_CHECK_DEVIOSUP)
23
24 //
25 // Local debug trace level
26 //
27
28 #define Dbg (DEBUG_TRACE_DEVIOSUP)
29
30 #define CollectDiskIoStats(VCB,FUNCTION,IS_USER_IO,COUNT) { \
31 PFILESYSTEM_STATISTICS Stats = &(VCB)->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors].Common; \
32 if (IS_USER_IO) { \
33 if ((FUNCTION) == IRP_MJ_WRITE) { \
34 Stats->UserDiskWrites += (COUNT); \
35 } else { \
36 Stats->UserDiskReads += (COUNT); \
37 } \
38 } else { \
39 if ((FUNCTION) == IRP_MJ_WRITE) { \
40 Stats->MetaDataDiskWrites += (COUNT); \
41 } else { \
42 Stats->MetaDataDiskReads += (COUNT); \
43 } \
44 } \
45 }
46
47 typedef struct _FAT_SYNC_CONTEXT {
48
49 //
50 // Io status block for the request
51 //
52
53 IO_STATUS_BLOCK Iosb;
54
55 //
56 // Event to be signaled when the request completes
57 //
58
59 KEVENT Event;
60
61 } FAT_SYNC_CONTEXT, *PFAT_SYNC_CONTEXT;
62
63
64 //
65 // Completion Routine declarations
66 //
67
68 IO_COMPLETION_ROUTINE FatMultiSyncCompletionRoutine;
69
70 NTSTATUS
71 NTAPI
72 FatMultiSyncCompletionRoutine (
73 _In_ PDEVICE_OBJECT DeviceObject,
74 _In_ PIRP Irp,
75 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
76 );
77
78 IO_COMPLETION_ROUTINE FatMultiAsyncCompletionRoutine;
79
80 NTSTATUS
81 NTAPI
82 FatMultiAsyncCompletionRoutine (
83 _In_ PDEVICE_OBJECT DeviceObject,
84 _In_ PIRP Irp,
85 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
86 );
87
88 IO_COMPLETION_ROUTINE FatSpecialSyncCompletionRoutine;
89
90 NTSTATUS
91 NTAPI
92 FatSpecialSyncCompletionRoutine (
93 _In_ PDEVICE_OBJECT DeviceObject,
94 _In_ PIRP Irp,
95 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
96 );
97
98 IO_COMPLETION_ROUTINE FatSingleSyncCompletionRoutine;
99
100 NTSTATUS
101 NTAPI
102 FatSingleSyncCompletionRoutine (
103 _In_ PDEVICE_OBJECT DeviceObject,
104 _In_ PIRP Irp,
105 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
106 );
107
108 IO_COMPLETION_ROUTINE FatSingleAsyncCompletionRoutine;
109
110 NTSTATUS
111 NTAPI
112 FatSingleAsyncCompletionRoutine (
113 _In_ PDEVICE_OBJECT DeviceObject,
114 _In_ PIRP Irp,
115 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
116 );
117
118 IO_COMPLETION_ROUTINE FatPagingFileCompletionRoutine;
119
120 NTSTATUS
121 NTAPI
122 FatPagingFileCompletionRoutine (
123 _In_ PDEVICE_OBJECT DeviceObject,
124 _In_ PIRP Irp,
125 _In_reads_opt_(_Inexpressible_("varies")) PVOID MasterIrp
126 );
127
128 IO_COMPLETION_ROUTINE FatPagingFileCompletionRoutineCatch;
129
130 NTSTATUS
131 NTAPI
132 FatPagingFileCompletionRoutineCatch (
133 _In_ PDEVICE_OBJECT DeviceObject,
134 _In_ PIRP Irp,
135 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
136 );
137
138 VOID
139 FatSingleNonAlignedSync (
140 IN PIRP_CONTEXT IrpContext,
141 IN PVCB Vcb,
142 IN PUCHAR Buffer,
143 IN LBO Lbo,
144 IN ULONG ByteCount,
145 IN PIRP Irp
146 );
147
148 //
149 // The following macro decides whether to send a request directly to
150 // the device driver, or to other routines. It was meant to
151 // replace IoCallDriver as transparently as possible. It must only be
152 // called with a read or write Irp.
153 //
154 // NTSTATUS
155 // FatLowLevelReadWrite (
156 // PIRP_CONTEXT IrpContext,
157 // PDEVICE_OBJECT DeviceObject,
158 // PIRP Irp,
159 // PVCB Vcb
160 // );
161 //
162
163 #define FatLowLevelReadWrite(IRPCONTEXT,DO,IRP,VCB) ( \
164 IoCallDriver((DO),(IRP)) \
165 )
166
167 //
168 // The following macro handles completion-time zeroing of buffers.
169 //
170
171 #define FatDoCompletionZero( I, C ) \
172 if ((C)->ZeroMdl) { \
173 NT_ASSERT( (C)->ZeroMdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | \
174 MDL_SOURCE_IS_NONPAGED_POOL));\
175 if (NT_SUCCESS((I)->IoStatus.Status)) { \
176 RtlZeroMemory( (C)->ZeroMdl->MappedSystemVa, \
177 (C)->ZeroMdl->ByteCount ); \
178 } \
179 IoFreeMdl((C)->ZeroMdl); \
180 (C)->ZeroMdl = NULL; \
181 }
182
183 #if (NTDDI_VERSION >= NTDDI_WIN8)
184 #define FatUpdateIOCountersPCW(IsAWrite,Count) \
185 FsRtlUpdateDiskCounters( ((IsAWrite) ? 0 : (Count) ), \
186 ((IsAWrite) ? (Count) : 0) )
187 #else
188 #define FatUpdateIOCountersPCW(IsAWrite,Count)
189 #endif
190
191 #ifdef ALLOC_PRAGMA
192 #pragma alloc_text(PAGE, FatMultipleAsync)
193 #pragma alloc_text(PAGE, FatSingleAsync)
194 #pragma alloc_text(PAGE, FatSingleNonAlignedSync)
195 #pragma alloc_text(PAGE, FatWaitSync)
196 #pragma alloc_text(PAGE, FatLockUserBuffer)
197 #pragma alloc_text(PAGE, FatBufferUserBuffer)
198 #pragma alloc_text(PAGE, FatMapUserBuffer)
199 #pragma alloc_text(PAGE, FatNonCachedIo)
200 #pragma alloc_text(PAGE, FatNonCachedNonAlignedRead)
201 #pragma alloc_text(PAGE, FatPerformDevIoCtrl)
202 #endif
203
204 typedef struct FAT_PAGING_FILE_CONTEXT {
205 KEVENT Event;
206 PMDL RestoreMdl;
207 } FAT_PAGING_FILE_CONTEXT, *PFAT_PAGING_FILE_CONTEXT;
208
209
210 VOID
FatPagingFileIo(IN PIRP Irp,IN PFCB Fcb)211 FatPagingFileIo (
212 IN PIRP Irp,
213 IN PFCB Fcb
214 )
215
216 /*++
217
218 Routine Description:
219
220 This routine performs the non-cached disk io described in its parameters.
221 This routine nevers blocks, and should only be used with the paging
222 file since no completion processing is performed.
223
224 Arguments:
225
226 Irp - Supplies the requesting Irp.
227
228 Fcb - Supplies the file to act on.
229
230 Return Value:
231
232 None.
233
234 --*/
235
236 {
237 //
238 // Declare some local variables for enumeration through the
239 // runs of the file.
240 //
241
242 VBO Vbo;
243 ULONG ByteCount;
244
245 PMDL Mdl;
246 LBO NextLbo;
247 VBO NextVbo = 0;
248 ULONG NextByteCount;
249 ULONG RemainingByteCount;
250 BOOLEAN MustSucceed;
251
252 ULONG FirstIndex;
253 ULONG CurrentIndex;
254 ULONG LastIndex;
255
256 LBO LastLbo;
257 ULONG LastByteCount;
258
259 BOOLEAN MdlIsReserve = FALSE;
260 BOOLEAN IrpIsMaster = FALSE;
261 FAT_PAGING_FILE_CONTEXT Context;
262 LONG IrpCount;
263
264 PIRP AssocIrp;
265 PIO_STACK_LOCATION IrpSp;
266 PIO_STACK_LOCATION NextIrpSp;
267 ULONG BufferOffset;
268 PDEVICE_OBJECT DeviceObject;
269
270 #ifndef __REACTOS__
271 BOOLEAN IsAWrite = FALSE;
272 #endif
273
274 DebugTrace(+1, Dbg, "FatPagingFileIo\n", 0);
275 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
276 DebugTrace( 0, Dbg, "Fcb = %p\n", Fcb );
277
278 NT_ASSERT( FlagOn( Fcb->FcbState, FCB_STATE_PAGING_FILE ));
279
280 //
281 // Initialize some locals.
282 //
283
284 BufferOffset = 0;
285 DeviceObject = Fcb->Vcb->TargetDeviceObject;
286 IrpSp = IoGetCurrentIrpStackLocation( Irp );
287
288 Vbo = IrpSp->Parameters.Read.ByteOffset.LowPart;
289 ByteCount = IrpSp->Parameters.Read.Length;
290 #ifndef __REACTOS__
291 IsAWrite = (IrpSp->MajorFunction == IRP_MJ_WRITE);
292 #endif
293
294 MustSucceed = FatLookupMcbEntry( Fcb->Vcb, &Fcb->Mcb,
295 Vbo,
296 &NextLbo,
297 &NextByteCount,
298 &FirstIndex);
299
300 //
301 // If this run isn't present, something is very wrong.
302 //
303
304 if (!MustSucceed) {
305
306 #ifdef _MSC_VER
307 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
308 #endif
309 FatBugCheck( Vbo, ByteCount, 0 );
310 }
311
312 #if (NTDDI_VERSION >= NTDDI_WIN8)
313
314 //
315 // Charge the IO to paging file to current thread
316 //
317
318 if (FatDiskAccountingEnabled) {
319
320 PETHREAD ThreadIssuingIo = PsGetCurrentThread();
321 BOOLEAN IsWriteOperation = FALSE;
322
323 if (IrpSp->MajorFunction == IRP_MJ_WRITE) {
324 IsWriteOperation = TRUE;
325 }
326
327 PsUpdateDiskCounters( PsGetThreadProcess( ThreadIssuingIo ),
328 (IsWriteOperation ? 0 : ByteCount ), // bytes to read
329 (IsWriteOperation ? ByteCount : 0), // bytes to write
330 (IsWriteOperation ? 0 : 1), // # of reads
331 (IsWriteOperation ? 1 : 0), // # of writes
332 0 );
333 }
334 #endif
335
336 // See if the write covers a single valid run, and if so pass
337 // it on.
338 //
339
340 if ( NextByteCount >= ByteCount ) {
341
342 DebugTrace( 0, Dbg, "Passing Irp on to Disk Driver\n", 0 );
343
344 //
345 // Setup the next IRP stack location for the disk driver beneath us.
346 //
347
348 NextIrpSp = IoGetNextIrpStackLocation( Irp );
349
350 NextIrpSp->MajorFunction = IrpSp->MajorFunction;
351 NextIrpSp->Parameters.Read.Length = ByteCount;
352 NextIrpSp->Parameters.Read.ByteOffset.QuadPart = NextLbo;
353
354 //
355 // Since this is Paging file IO, we'll just ignore the verify bit.
356 //
357
358 SetFlag( NextIrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
359
360 //
361 // Set up the completion routine address in our stack frame.
362 // This is only invoked on error or cancel, and just copies
363 // the error Status into master irp's iosb.
364 //
365 // If the error implies a media problem, it also enqueues a
366 // worker item to write out the dirty bit so that the next
367 // time we run we will do a autochk /r
368 //
369
370 IoSetCompletionRoutine( Irp,
371 &FatPagingFileCompletionRoutine,
372 Irp,
373 FALSE,
374 TRUE,
375 TRUE );
376
377 //
378 // Issue the read/write request
379 //
380 // If IoCallDriver returns an error, it has completed the Irp
381 // and the error will be dealt with as a normal IO error.
382 //
383
384 (VOID)IoCallDriver( DeviceObject, Irp );
385
386 //
387 // We just issued an IO to the storage stack, update the counters indicating so.
388 //
389
390 if (FatDiskAccountingEnabled) {
391
392 FatUpdateIOCountersPCW( IsAWrite, ByteCount );
393 }
394
395 DebugTrace(-1, Dbg, "FatPagingFileIo -> VOID\n", 0);
396 return;
397 }
398
399 //
400 // Find out how may runs there are.
401 //
402
403 MustSucceed = FatLookupMcbEntry( Fcb->Vcb, &Fcb->Mcb,
404 Vbo + ByteCount - 1,
405 &LastLbo,
406 &LastByteCount,
407 &LastIndex);
408
409 //
410 // If this run isn't present, something is very wrong.
411 //
412
413 if (!MustSucceed) {
414
415 #ifdef _MSC_VER
416 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
417 #endif
418 FatBugCheck( Vbo + ByteCount - 1, 1, 0 );
419 }
420
421 CurrentIndex = FirstIndex;
422
423 //
424 // Now set up the Irp->IoStatus. It will be modified by the
425 // multi-completion routine in case of error or verify required.
426 //
427
428 Irp->IoStatus.Status = STATUS_SUCCESS;
429 Irp->IoStatus.Information = ByteCount;
430
431 //
432 // Loop while there are still byte writes to satisfy. The way we'll work this
433 // is to hope for the best - one associated IRP per run, which will let us be
434 // completely async after launching all the IO.
435 //
436 // IrpCount will indicate the remaining number of associated Irps to launch.
437 //
438 // All we have to do is make sure IrpCount doesn't hit zero before we're building
439 // the very last Irp. If it is positive when we're done, it means we have to
440 // wait for the rest of the associated Irps to come back before we complete the
441 // master by hand.
442 //
443 // This will keep the master from completing early.
444 //
445
446 Irp->AssociatedIrp.IrpCount = IrpCount = LastIndex - FirstIndex + 1;
447
448 while (CurrentIndex <= LastIndex) {
449
450 //
451 // Reset this for unwinding purposes
452 //
453
454 AssocIrp = NULL;
455
456 //
457 // If next run is larger than we need, "ya get what ya need".
458 //
459
460 if (NextByteCount > ByteCount) {
461 NextByteCount = ByteCount;
462 }
463
464 RemainingByteCount = 0;
465
466 //
467 // Allocate and build a partial Mdl for the request.
468 //
469
470 Mdl = IoAllocateMdl( (PCHAR)Irp->UserBuffer + BufferOffset,
471 NextByteCount,
472 FALSE,
473 FALSE,
474 AssocIrp );
475
476 if (Mdl == NULL) {
477
478 //
479 // Pick up the reserve MDL
480 //
481
482 KeWaitForSingleObject( &FatReserveEvent, Executive, KernelMode, FALSE, NULL );
483
484 Mdl = FatReserveMdl;
485 MdlIsReserve = TRUE;
486
487 //
488 // Trim to fit the size of the reserve MDL.
489 //
490
491 if (NextByteCount > FAT_RESERVE_MDL_SIZE * PAGE_SIZE) {
492
493 RemainingByteCount = NextByteCount - FAT_RESERVE_MDL_SIZE * PAGE_SIZE;
494 NextByteCount = FAT_RESERVE_MDL_SIZE * PAGE_SIZE;
495 }
496 }
497
498 IoBuildPartialMdl( Irp->MdlAddress,
499 Mdl,
500 (PCHAR)Irp->UserBuffer + BufferOffset,
501 NextByteCount );
502
503 //
504 // Now that we have properly bounded this piece of the transfer, it is
505 // time to read/write it. We can simplify life slightly by always
506 // re-using the master IRP for cases where we use the reserve MDL,
507 // since we'll always be synchronous for those and can use a single
508 // completion context on our local stack.
509 //
510 // We also must prevent ourselves from issuing an associated IRP that would
511 // complete the master UNLESS this is the very last IRP we'll issue.
512 //
513 // This logic looks a bit complicated, but is hopefully understandable.
514 //
515
516 if (!MdlIsReserve &&
517 (IrpCount != 1 ||
518 (CurrentIndex == LastIndex &&
519 RemainingByteCount == 0))) {
520
521 AssocIrp = IoMakeAssociatedIrp( Irp, (CCHAR)(DeviceObject->StackSize + 1) );
522 }
523
524 if (AssocIrp == NULL) {
525
526 AssocIrp = Irp;
527 IrpIsMaster = TRUE;
528
529 //
530 // We need to drain the associated Irps so we can reliably figure out if
531 // the master Irp is showing a failed status, in which case we bail out
532 // immediately - as opposed to putting the value in the status field in
533 // jeopardy due to our re-use of the master Irp.
534 //
535
536 while (Irp->AssociatedIrp.IrpCount != IrpCount) {
537
538 KeDelayExecutionThread (KernelMode, FALSE, &Fat30Milliseconds);
539 }
540
541 //
542 // Note that since we failed to launch this associated Irp, that the completion
543 // code at the bottom will take care of completing the master Irp.
544 //
545
546 if (!NT_SUCCESS(Irp->IoStatus.Status)) {
547
548 NT_ASSERT( IrpCount );
549 break;
550 }
551
552 } else {
553
554 //
555 // Indicate we used an associated Irp.
556 //
557
558 IrpCount -= 1;
559 }
560
561 //
562 // With an associated IRP, we must take over the first stack location so
563 // we can have one to put the completion routine on. When re-using the
564 // master IRP, its already there.
565 //
566
567 if (!IrpIsMaster) {
568
569 //
570 // Get the first IRP stack location in the associated Irp
571 //
572
573 IoSetNextIrpStackLocation( AssocIrp );
574 NextIrpSp = IoGetCurrentIrpStackLocation( AssocIrp );
575
576 //
577 // Setup the Stack location to describe our read.
578 //
579
580 NextIrpSp->MajorFunction = IrpSp->MajorFunction;
581 NextIrpSp->Parameters.Read.Length = NextByteCount;
582 NextIrpSp->Parameters.Read.ByteOffset.QuadPart = Vbo;
583
584 //
585 // We also need the VolumeDeviceObject in the Irp stack in case
586 // we take the failure path.
587 //
588
589 NextIrpSp->DeviceObject = IrpSp->DeviceObject;
590
591 } else {
592
593 //
594 // Save the MDL in the IRP and prepare the stack
595 // context for the completion routine.
596 //
597
598 KeInitializeEvent( &Context.Event, SynchronizationEvent, FALSE );
599 Context.RestoreMdl = Irp->MdlAddress;
600 }
601
602 //
603 // And drop our Mdl into the Irp.
604 //
605
606 AssocIrp->MdlAddress = Mdl;
607
608 //
609 // Set up the completion routine address in our stack frame.
610 // For true associated IRPs, this is only invoked on error or
611 // cancel, and just copies the error Status into master irp's
612 // iosb.
613 //
614 // If the error implies a media problem, it also enqueues a
615 // worker item to write out the dirty bit so that the next
616 // time we run we will do a autochk /r
617 //
618
619 if (IrpIsMaster) {
620
621 IoSetCompletionRoutine( AssocIrp,
622 FatPagingFileCompletionRoutineCatch,
623 &Context,
624 TRUE,
625 TRUE,
626 TRUE );
627
628 } else {
629
630 IoSetCompletionRoutine( AssocIrp,
631 FatPagingFileCompletionRoutine,
632 Irp,
633 FALSE,
634 TRUE,
635 TRUE );
636 }
637
638 //
639 // Setup the next IRP stack location for the disk driver beneath us.
640 //
641
642 NextIrpSp = IoGetNextIrpStackLocation( AssocIrp );
643
644 //
645 // Since this is paging file IO, we'll just ignore the verify bit.
646 //
647
648 SetFlag( NextIrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
649
650 //
651 // Setup the Stack location to do a read from the disk driver.
652 //
653
654 NextIrpSp->MajorFunction = IrpSp->MajorFunction;
655 NextIrpSp->Parameters.Read.Length = NextByteCount;
656 NextIrpSp->Parameters.Read.ByteOffset.QuadPart = NextLbo;
657
658 (VOID)IoCallDriver( DeviceObject, AssocIrp );
659
660 //
661 // We just issued an IO to the storage stack, update the counters indicating so.
662 //
663
664 if (FatDiskAccountingEnabled) {
665
666 FatUpdateIOCountersPCW( IsAWrite, (ULONG64)NextByteCount );
667 }
668
669 //
670 // Wait for the Irp in the catch case and drop the flags.
671 //
672
673 if (IrpIsMaster) {
674
675 KeWaitForSingleObject( &Context.Event, Executive, KernelMode, FALSE, NULL );
676 IrpIsMaster = MdlIsReserve = FALSE;
677
678 //
679 // If the Irp is showing a failed status, there is no point in continuing.
680 // In doing so, we get to avoid squirreling away the failed status in case
681 // we were to re-use the master irp again.
682 //
683 // Note that since we re-used the master, we must not have issued the "last"
684 // associated Irp, and thus the completion code at the bottom will take care
685 // of that for us.
686 //
687
688 if (!NT_SUCCESS(Irp->IoStatus.Status)) {
689
690 NT_ASSERT( IrpCount );
691 break;
692 }
693 }
694
695 //
696 // Now adjust everything for the next pass through the loop.
697 //
698
699 Vbo += NextByteCount;
700 BufferOffset += NextByteCount;
701 ByteCount -= NextByteCount;
702
703 //
704 // Try to lookup the next run, if we are not done and we got
705 // all the way through the current run.
706 //
707
708 if (RemainingByteCount) {
709
710 //
711 // Advance the Lbo/Vbo if we have more to do in the current run.
712 //
713
714 NextLbo += NextByteCount;
715 NextVbo += NextByteCount;
716
717 NextByteCount = RemainingByteCount;
718
719 } else {
720
721 CurrentIndex += 1;
722
723 if ( CurrentIndex <= LastIndex ) {
724
725 NT_ASSERT( ByteCount != 0 );
726
727 FatGetNextMcbEntry( Fcb->Vcb, &Fcb->Mcb,
728 CurrentIndex,
729 &NextVbo,
730 &NextLbo,
731 &NextByteCount );
732
733 NT_ASSERT( NextVbo == Vbo );
734 }
735 }
736 } // while ( CurrentIndex <= LastIndex )
737
738 //
739 // If we didn't get enough associated Irps going to make this asynchronous, we
740 // twiddle our thumbs and wait for those we did launch to complete.
741 //
742
743 if (IrpCount) {
744
745 while (Irp->AssociatedIrp.IrpCount != IrpCount) {
746
747 KeDelayExecutionThread (KernelMode, FALSE, &Fat30Milliseconds);
748 }
749
750 IoCompleteRequest( Irp, IO_DISK_INCREMENT );
751 }
752
753 DebugTrace(-1, Dbg, "FatPagingFileIo -> VOID\n", 0);
754 return;
755 }
756
757 #if (NTDDI_VERSION >= NTDDI_WIN8)
758
759 VOID
FatUpdateDiskStats(IN PIRP_CONTEXT IrpContext,IN PIRP Irp,IN ULONG ByteCount)760 FatUpdateDiskStats (
761 IN PIRP_CONTEXT IrpContext,
762 IN PIRP Irp,
763 IN ULONG ByteCount
764 )
765 /*++
766
767 Routine Description:
768
769 Charge appropriate process for the IO this IRP will cause.
770
771 Arguments:
772
773 IrpContext- The Irp Context
774
775 Irp - Supplies the requesting Irp.
776
777 ByteCount - The lengh of the operation.
778
779 Return Value:
780
781 None.
782
783 --*/
784
785 {
786 PETHREAD OriginatingThread = NULL;
787 ULONG NumReads = 0;
788 ULONG NumWrites = 0;
789 ULONGLONG BytesToRead = 0;
790 ULONGLONG BytesToWrite = 0;
791
792 //
793 // Here we attempt to charge the IO back to the originating process.
794 // - These checks are intended to cover following cases:
795 // o Buffered sync reads
796 // o Unbuffered sync read
797 // o Inline metadata reads
798 // o memory mapped reads (in-line faulting of data)
799 //
800
801 if (IrpContext->MajorFunction == IRP_MJ_READ) {
802
803 NumReads++;
804 BytesToRead = ByteCount;
805
806 if ((Irp->Tail.Overlay.Thread != NULL) &&
807 !IoIsSystemThread( Irp->Tail.Overlay.Thread )) {
808
809 OriginatingThread = Irp->Tail.Overlay.Thread;
810
811 } else if (!IoIsSystemThread( PsGetCurrentThread() )) {
812
813 OriginatingThread = PsGetCurrentThread();
814
815 //
816 // We couldn't find a non-system entity, so this should be charged to system.
817 // Do so only if we are top level.
818 // If we are not top-level then the read was initiated by someone like Cc (read ahead)
819 // who should have already accounted for this IO.
820 //
821
822 } else if (IoIsSystemThread( PsGetCurrentThread() ) &&
823 (IoGetTopLevelIrp() == Irp)) {
824
825 OriginatingThread = PsGetCurrentThread();
826 }
827
828 //
829 // Charge the write to Originating process.
830 // Intended to cover the following writes:
831 // - Unbuffered sync write
832 // - unbuffered async write
833 //
834 // If we re not top-level, then it should already have been accounted for
835 // somewhere else (Cc).
836 //
837
838 } else if (IrpContext->MajorFunction == IRP_MJ_WRITE) {
839
840 NumWrites++;
841 BytesToWrite = ByteCount;
842
843 if (IoGetTopLevelIrp() == Irp) {
844
845 if ((Irp->Tail.Overlay.Thread != NULL) &&
846 !IoIsSystemThread( Irp->Tail.Overlay.Thread )) {
847
848 OriginatingThread = Irp->Tail.Overlay.Thread;
849
850 } else {
851
852 OriginatingThread = PsGetCurrentThread();
853 }
854
855 //
856 // For mapped page writes
857 //
858
859 } else if (IoGetTopLevelIrp() == (PIRP)FSRTL_MOD_WRITE_TOP_LEVEL_IRP) {
860
861 OriginatingThread = PsGetCurrentThread();
862 }
863 }
864
865 if (OriginatingThread != NULL) {
866
867 PsUpdateDiskCounters( PsGetThreadProcess( OriginatingThread ),
868 BytesToRead,
869 BytesToWrite,
870 NumReads,
871 NumWrites,
872 0 );
873 }
874 }
875
876 #endif
877
878
879
_Requires_lock_held_(_Global_critical_region_)880 _Requires_lock_held_(_Global_critical_region_)
881 NTSTATUS
882 FatNonCachedIo (
883 IN PIRP_CONTEXT IrpContext,
884 IN PIRP Irp,
885 IN PFCB FcbOrDcb,
886 IN ULONG StartingVbo,
887 IN ULONG ByteCount,
888 IN ULONG UserByteCount,
889 IN ULONG StreamFlags
890 )
891 /*++
892
893 Routine Description:
894
895 This routine performs the non-cached disk io described in its parameters.
896 The choice of a single run is made if possible, otherwise multiple runs
897 are executed.
898
899 Arguments:
900
901 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
902
903 Irp - Supplies the requesting Irp.
904
905 FcbOrDcb - Supplies the file to act on.
906
907 StartingVbo - The starting point for the operation.
908
909 ByteCount - The lengh of the operation.
910
911 UserByteCount - The last byte the user can see, rest to be zeroed.
912
913 StreamFlags - flag to indicate special attributes for a NonCachedIo.
914
915 Return Value:
916
917 None.
918
919 --*/
920
921 {
922
923 //
924 // Declare some local variables for enumeration through the
925 // runs of the file, and an array to store parameters for
926 // parallel I/Os
927 //
928
929 BOOLEAN Wait;
930
931 LBO NextLbo;
932 VBO NextVbo;
933 ULONG NextByteCount;
934 BOOLEAN NextIsAllocated;
935
936 LBO LastLbo;
937 ULONG LastByteCount;
938 BOOLEAN LastIsAllocated;
939
940 BOOLEAN EndOnMax;
941
942 ULONG FirstIndex;
943 ULONG CurrentIndex;
944 ULONG LastIndex;
945
946 ULONG NextRun;
947 ULONG BufferOffset;
948 ULONG OriginalByteCount;
949
950
951
952 IO_RUN StackIoRuns[FAT_MAX_IO_RUNS_ON_STACK];
953 PIO_RUN IoRuns;
954
955
956 PAGED_CODE();
957
958 UNREFERENCED_PARAMETER( StreamFlags );
959
960 DebugTrace(+1, Dbg, "FatNonCachedIo\n", 0);
961 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
962 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
963 DebugTrace( 0, Dbg, "FcbOrDcb = %p\n", FcbOrDcb );
964 DebugTrace( 0, Dbg, "StartingVbo = %08lx\n", StartingVbo );
965 DebugTrace( 0, Dbg, "ByteCount = %08lx\n", ByteCount );
966
967 if (!FlagOn(Irp->Flags, IRP_PAGING_IO)) {
968
969 PFILE_SYSTEM_STATISTICS Stats =
970 &FcbOrDcb->Vcb->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors];
971
972 if (IrpContext->MajorFunction == IRP_MJ_READ) {
973 Stats->Fat.NonCachedReads += 1;
974 Stats->Fat.NonCachedReadBytes += ByteCount;
975 } else {
976 Stats->Fat.NonCachedWrites += 1;
977 Stats->Fat.NonCachedWriteBytes += ByteCount;
978 }
979 }
980
981 //
982 // Initialize some locals.
983 //
984
985 NextRun = 0;
986 BufferOffset = 0;
987 OriginalByteCount = ByteCount;
988
989 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
990
991 #if (NTDDI_VERSION >= NTDDI_WIN8)
992
993 //
994 // Disk IO accounting
995 //
996
997 if (FatDiskAccountingEnabled) {
998
999 FatUpdateDiskStats( IrpContext,
1000 Irp,
1001 ByteCount );
1002 }
1003 #endif
1004
1005 //
1006 // For nonbuffered I/O, we need the buffer locked in all
1007 // cases.
1008 //
1009 // This call may raise. If this call succeeds and a subsequent
1010 // condition is raised, the buffers are unlocked automatically
1011 // by the I/O system when the request is completed, via the
1012 // Irp->MdlAddress field.
1013 //
1014
1015 FatLockUserBuffer( IrpContext,
1016 Irp,
1017 (IrpContext->MajorFunction == IRP_MJ_READ) ?
1018 IoWriteAccess : IoReadAccess,
1019 ByteCount );
1020
1021
1022
1023 //
1024 // No zeroing for trailing sectors if requested.
1025 // Otherwise setup the required zeroing for read requests.
1026 //
1027
1028
1029 if (UserByteCount != ByteCount) {
1030
1031
1032 PMDL Mdl;
1033
1034 NT_ASSERT( ByteCount > UserByteCount );
1035 _Analysis_assume_(ByteCount > UserByteCount);
1036
1037 Mdl = IoAllocateMdl( (PUCHAR) Irp->UserBuffer + UserByteCount,
1038 ByteCount - UserByteCount,
1039 FALSE,
1040 FALSE,
1041 NULL );
1042
1043 if (Mdl == NULL) {
1044
1045 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1046 }
1047
1048 IoBuildPartialMdl( Irp->MdlAddress,
1049 Mdl,
1050 (PUCHAR) Irp->UserBuffer + UserByteCount,
1051 ByteCount - UserByteCount );
1052
1053 IrpContext->FatIoContext->ZeroMdl = Mdl;
1054
1055 //
1056 // Map the MDL now so we can't fail at IO completion time. Note
1057 // that this will be only a single page.
1058 //
1059
1060 if (MmGetSystemAddressForMdlSafe( Mdl, NormalPagePriority | MdlMappingNoExecute ) == NULL) {
1061
1062 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1063 }
1064 }
1065
1066
1067 //
1068 // Try to lookup the first run. If there is just a single run,
1069 // we may just be able to pass it on.
1070 //
1071
1072 FatLookupFileAllocation( IrpContext,
1073 FcbOrDcb,
1074 StartingVbo,
1075 &NextLbo,
1076 &NextByteCount,
1077 &NextIsAllocated,
1078 &EndOnMax,
1079 &FirstIndex );
1080
1081 //
1082 // We just added the allocation, thus there must be at least
1083 // one entry in the mcb corresponding to our write, ie.
1084 // NextIsAllocated must be true. If not, the pre-existing file
1085 // must have an allocation error.
1086 //
1087
1088 if ( !NextIsAllocated ) {
1089
1090 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1091
1092 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1093 }
1094
1095 NT_ASSERT( NextByteCount != 0 );
1096
1097 //
1098 // If the request was not aligned correctly, read in the first
1099 // part first.
1100 //
1101
1102
1103 //
1104 // See if the write covers a single valid run, and if so pass
1105 // it on. We must bias this by the byte that is lost at the
1106 // end of the maximal file.
1107 //
1108
1109 if ( NextByteCount >= ByteCount - (EndOnMax ? 1 : 0)) {
1110
1111 if (FlagOn(Irp->Flags, IRP_PAGING_IO)) {
1112 CollectDiskIoStats(FcbOrDcb->Vcb, IrpContext->MajorFunction,
1113 FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_USER_IO), 1);
1114 } else {
1115
1116 PFILE_SYSTEM_STATISTICS Stats =
1117 &FcbOrDcb->Vcb->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors];
1118
1119 if (IrpContext->MajorFunction == IRP_MJ_READ) {
1120 Stats->Fat.NonCachedDiskReads += 1;
1121 } else {
1122 Stats->Fat.NonCachedDiskWrites += 1;
1123 }
1124 }
1125
1126 DebugTrace( 0, Dbg, "Passing 1 Irp on to Disk Driver\n", 0 );
1127
1128 FatSingleAsync( IrpContext,
1129 FcbOrDcb->Vcb,
1130 NextLbo,
1131 ByteCount,
1132 Irp );
1133
1134 } else {
1135
1136 //
1137 // If there we can't wait, and there are more runs than we can handle,
1138 // we will have to post this request.
1139 //
1140
1141 FatLookupFileAllocation( IrpContext,
1142 FcbOrDcb,
1143 StartingVbo + ByteCount - 1,
1144 &LastLbo,
1145 &LastByteCount,
1146 &LastIsAllocated,
1147 &EndOnMax,
1148 &LastIndex );
1149
1150 //
1151 // Since we already added the allocation for the whole
1152 // write, assert that we find runs until ByteCount == 0
1153 // Otherwise this file is corrupt.
1154 //
1155
1156 if ( !LastIsAllocated ) {
1157
1158 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1159
1160 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1161 }
1162
1163 if (LastIndex - FirstIndex + 1 > FAT_MAX_IO_RUNS_ON_STACK) {
1164
1165 IoRuns = FsRtlAllocatePoolWithTag( PagedPool,
1166 (LastIndex - FirstIndex + 1) * sizeof(IO_RUN),
1167 TAG_IO_RUNS );
1168
1169 } else {
1170
1171 IoRuns = StackIoRuns;
1172 }
1173
1174 NT_ASSERT( LastIndex != FirstIndex );
1175
1176 CurrentIndex = FirstIndex;
1177
1178 //
1179 // Loop while there are still byte writes to satisfy.
1180 //
1181
1182 while (CurrentIndex <= LastIndex) {
1183
1184
1185 NT_ASSERT( NextByteCount != 0);
1186 NT_ASSERT( ByteCount != 0);
1187
1188 //
1189 // If next run is larger than we need, "ya get what you need".
1190 //
1191
1192 if (NextByteCount > ByteCount) {
1193 NextByteCount = ByteCount;
1194 }
1195
1196 //
1197 // Now that we have properly bounded this piece of the
1198 // transfer, it is time to write it.
1199 //
1200 // We remember each piece of a parallel run by saving the
1201 // essential information in the IoRuns array. The tranfers
1202 // are started up in parallel below.
1203 //
1204
1205 IoRuns[NextRun].Vbo = StartingVbo;
1206 IoRuns[NextRun].Lbo = NextLbo;
1207 IoRuns[NextRun].Offset = BufferOffset;
1208 IoRuns[NextRun].ByteCount = NextByteCount;
1209 NextRun += 1;
1210
1211 //
1212 // Now adjust everything for the next pass through the loop.
1213 //
1214
1215 StartingVbo += NextByteCount;
1216 BufferOffset += NextByteCount;
1217 ByteCount -= NextByteCount;
1218
1219 //
1220 // Try to lookup the next run (if we are not done).
1221 //
1222
1223 CurrentIndex += 1;
1224
1225 if ( CurrentIndex <= LastIndex ) {
1226
1227 NT_ASSERT( ByteCount != 0 );
1228
1229 FatGetNextMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb,
1230 CurrentIndex,
1231 &NextVbo,
1232 &NextLbo,
1233 &NextByteCount );
1234
1235
1236 NT_ASSERT(NextVbo == StartingVbo);
1237
1238
1239 }
1240
1241 } // while ( CurrentIndex <= LastIndex )
1242
1243 //
1244 // Now set up the Irp->IoStatus. It will be modified by the
1245 // multi-completion routine in case of error or verify required.
1246 //
1247
1248 Irp->IoStatus.Status = STATUS_SUCCESS;
1249 Irp->IoStatus.Information = OriginalByteCount;
1250
1251 if (FlagOn(Irp->Flags, IRP_PAGING_IO)) {
1252 CollectDiskIoStats(FcbOrDcb->Vcb, IrpContext->MajorFunction,
1253 FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_USER_IO), NextRun);
1254 }
1255
1256 //
1257 // OK, now do the I/O.
1258 //
1259
1260 _SEH2_TRY {
1261
1262 DebugTrace( 0, Dbg, "Passing Multiple Irps on to Disk Driver\n", 0 );
1263
1264 FatMultipleAsync( IrpContext,
1265 FcbOrDcb->Vcb,
1266 Irp,
1267 NextRun,
1268 IoRuns );
1269
1270 } _SEH2_FINALLY {
1271
1272 if (IoRuns != StackIoRuns) {
1273
1274 ExFreePool( IoRuns );
1275 }
1276 } _SEH2_END;
1277 }
1278
1279 if (!Wait) {
1280
1281 DebugTrace(-1, Dbg, "FatNonCachedIo -> STATUS_PENDING\n", 0);
1282 return STATUS_PENDING;
1283 }
1284
1285 FatWaitSync( IrpContext );
1286
1287
1288 DebugTrace(-1, Dbg, "FatNonCachedIo -> 0x%08lx\n", Irp->IoStatus.Status);
1289 return Irp->IoStatus.Status;
1290 }
1291
1292
_Requires_lock_held_(_Global_critical_region_)1293 _Requires_lock_held_(_Global_critical_region_)
1294 VOID
1295 FatNonCachedNonAlignedRead (
1296 IN PIRP_CONTEXT IrpContext,
1297 IN PIRP Irp,
1298 IN PFCB FcbOrDcb,
1299 IN ULONG StartingVbo,
1300 IN ULONG ByteCount
1301 )
1302
1303 /*++
1304
1305 Routine Description:
1306
1307 This routine performs the non-cached disk io described in its parameters.
1308 This routine differs from the above in that the range does not have to be
1309 sector aligned. This accomplished with the use of intermediate buffers.
1310
1311 Arguments:
1312
1313 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
1314
1315 Irp - Supplies the requesting Irp.
1316
1317 FcbOrDcb - Supplies the file to act on.
1318
1319 StartingVbo - The starting point for the operation.
1320
1321 ByteCount - The lengh of the operation.
1322
1323 Return Value:
1324
1325 None.
1326
1327 --*/
1328
1329 {
1330 //
1331 // Declare some local variables for enumeration through the
1332 // runs of the file, and an array to store parameters for
1333 // parallel I/Os
1334 //
1335
1336 LBO NextLbo;
1337 ULONG NextByteCount;
1338 BOOLEAN NextIsAllocated;
1339
1340 ULONG SectorSize;
1341 ULONG BytesToCopy;
1342 ULONG OriginalByteCount;
1343 ULONG OriginalStartingVbo;
1344
1345 BOOLEAN EndOnMax;
1346
1347 PUCHAR UserBuffer;
1348 PUCHAR DiskBuffer = NULL;
1349
1350 PMDL Mdl;
1351 PMDL SavedMdl;
1352 PVOID SavedUserBuffer;
1353
1354 PAGED_CODE();
1355
1356 DebugTrace(+1, Dbg, "FatNonCachedNonAlignedRead\n", 0);
1357 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
1358 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
1359 DebugTrace( 0, Dbg, "FcbOrDcb = %p\n", FcbOrDcb );
1360 DebugTrace( 0, Dbg, "StartingVbo = %08lx\n", StartingVbo );
1361 DebugTrace( 0, Dbg, "ByteCount = %08lx\n", ByteCount );
1362
1363 //
1364 // Initialize some locals.
1365 //
1366
1367 OriginalByteCount = ByteCount;
1368 OriginalStartingVbo = StartingVbo;
1369 SectorSize = FcbOrDcb->Vcb->Bpb.BytesPerSector;
1370
1371 NT_ASSERT( FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT) );
1372
1373 //
1374 // For nonbuffered I/O, we need the buffer locked in all
1375 // cases.
1376 //
1377 // This call may raise. If this call succeeds and a subsequent
1378 // condition is raised, the buffers are unlocked automatically
1379 // by the I/O system when the request is completed, via the
1380 // Irp->MdlAddress field.
1381 //
1382
1383 FatLockUserBuffer( IrpContext,
1384 Irp,
1385 IoWriteAccess,
1386 ByteCount );
1387
1388 UserBuffer = FatMapUserBuffer( IrpContext, Irp );
1389
1390 //
1391 // Allocate the local buffer
1392 //
1393
1394 DiskBuffer = FsRtlAllocatePoolWithTag( NonPagedPoolNxCacheAligned,
1395 (ULONG) ROUND_TO_PAGES( SectorSize ),
1396 TAG_IO_BUFFER );
1397
1398 //
1399 // We use a try block here to ensure the buffer is freed, and to
1400 // fill in the correct byte count in the Iosb.Information field.
1401 //
1402
1403 _SEH2_TRY {
1404
1405 //
1406 // If the beginning of the request was not aligned correctly, read in
1407 // the first part first.
1408 //
1409
1410 if ( StartingVbo & (SectorSize - 1) ) {
1411
1412 VBO Hole;
1413
1414 //
1415 // Try to lookup the first run.
1416 //
1417
1418 FatLookupFileAllocation( IrpContext,
1419 FcbOrDcb,
1420 StartingVbo,
1421 &NextLbo,
1422 &NextByteCount,
1423 &NextIsAllocated,
1424 &EndOnMax,
1425 NULL );
1426
1427 //
1428 // We just added the allocation, thus there must be at least
1429 // one entry in the mcb corresponding to our write, ie.
1430 // NextIsAllocated must be true. If not, the pre-existing file
1431 // must have an allocation error.
1432 //
1433
1434 if ( !NextIsAllocated ) {
1435
1436 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1437
1438 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1439 }
1440
1441 FatSingleNonAlignedSync( IrpContext,
1442 FcbOrDcb->Vcb,
1443 DiskBuffer,
1444 NextLbo & ~((LONG)SectorSize - 1),
1445 SectorSize,
1446 Irp );
1447
1448 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
1449
1450 try_return( NOTHING );
1451 }
1452
1453 //
1454 // Now copy the part of the first sector that we want to the user
1455 // buffer.
1456 //
1457
1458 Hole = StartingVbo & (SectorSize - 1);
1459
1460 BytesToCopy = ByteCount >= SectorSize - Hole ?
1461 SectorSize - Hole : ByteCount;
1462
1463 RtlCopyMemory( UserBuffer, DiskBuffer + Hole, BytesToCopy );
1464
1465 StartingVbo += BytesToCopy;
1466 ByteCount -= BytesToCopy;
1467
1468 if ( ByteCount == 0 ) {
1469
1470 try_return( NOTHING );
1471 }
1472 }
1473
1474 NT_ASSERT( (StartingVbo & (SectorSize - 1)) == 0 );
1475
1476 //
1477 // If there is a tail part that is not sector aligned, read it.
1478 //
1479
1480 if ( ByteCount & (SectorSize - 1) ) {
1481
1482 VBO LastSectorVbo;
1483
1484 LastSectorVbo = StartingVbo + (ByteCount & ~(SectorSize - 1));
1485
1486 //
1487 // Try to lookup the last part of the requested range.
1488 //
1489
1490 FatLookupFileAllocation( IrpContext,
1491 FcbOrDcb,
1492 LastSectorVbo,
1493 &NextLbo,
1494 &NextByteCount,
1495 &NextIsAllocated,
1496 &EndOnMax,
1497 NULL );
1498
1499 //
1500 // We just added the allocation, thus there must be at least
1501 // one entry in the mcb corresponding to our write, ie.
1502 // NextIsAllocated must be true. If not, the pre-existing file
1503 // must have an allocation error.
1504 //
1505
1506 if ( !NextIsAllocated ) {
1507
1508 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1509
1510 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1511 }
1512
1513 FatSingleNonAlignedSync( IrpContext,
1514 FcbOrDcb->Vcb,
1515 DiskBuffer,
1516 NextLbo,
1517 SectorSize,
1518 Irp );
1519
1520 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
1521
1522 try_return( NOTHING );
1523 }
1524
1525 //
1526 // Now copy over the part of this last sector that we need.
1527 //
1528
1529 BytesToCopy = ByteCount & (SectorSize - 1);
1530
1531 UserBuffer += LastSectorVbo - OriginalStartingVbo;
1532
1533 RtlCopyMemory( UserBuffer, DiskBuffer, BytesToCopy );
1534
1535 ByteCount -= BytesToCopy;
1536
1537 if ( ByteCount == 0 ) {
1538
1539 try_return( NOTHING );
1540 }
1541 }
1542
1543 NT_ASSERT( ((StartingVbo | ByteCount) & (SectorSize - 1)) == 0 );
1544
1545 //
1546 // Now build a Mdl describing the sector aligned balance of the transfer,
1547 // and put it in the Irp, and read that part.
1548 //
1549
1550 SavedMdl = Irp->MdlAddress;
1551 Irp->MdlAddress = NULL;
1552
1553 SavedUserBuffer = Irp->UserBuffer;
1554
1555 Irp->UserBuffer = (PUCHAR)MmGetMdlVirtualAddress( SavedMdl ) +
1556 (StartingVbo - OriginalStartingVbo);
1557
1558 Mdl = IoAllocateMdl( Irp->UserBuffer,
1559 ByteCount,
1560 FALSE,
1561 FALSE,
1562 Irp );
1563
1564 if (Mdl == NULL) {
1565
1566 Irp->MdlAddress = SavedMdl;
1567 Irp->UserBuffer = SavedUserBuffer;
1568 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1569 }
1570
1571 IoBuildPartialMdl( SavedMdl,
1572 Mdl,
1573 Irp->UserBuffer,
1574 ByteCount );
1575
1576 //
1577 // Try to read in the pages.
1578 //
1579
1580 _SEH2_TRY {
1581
1582 FatNonCachedIo( IrpContext,
1583 Irp,
1584 FcbOrDcb,
1585 StartingVbo,
1586 ByteCount,
1587 ByteCount,
1588 0 );
1589
1590 } _SEH2_FINALLY {
1591
1592 IoFreeMdl( Irp->MdlAddress );
1593
1594 Irp->MdlAddress = SavedMdl;
1595 Irp->UserBuffer = SavedUserBuffer;
1596 } _SEH2_END;
1597
1598 try_exit: NOTHING;
1599
1600 } _SEH2_FINALLY {
1601
1602 ExFreePool( DiskBuffer );
1603
1604 if ( !_SEH2_AbnormalTermination() && NT_SUCCESS(Irp->IoStatus.Status) ) {
1605
1606 Irp->IoStatus.Information = OriginalByteCount;
1607
1608 //
1609 // We now flush the user's buffer to memory.
1610 //
1611
1612 KeFlushIoBuffers( Irp->MdlAddress, TRUE, FALSE );
1613 }
1614 } _SEH2_END;
1615
1616 DebugTrace(-1, Dbg, "FatNonCachedNonAlignedRead -> VOID\n", 0);
1617 return;
1618 }
1619
1620
1621 VOID
FatMultipleAsync(IN PIRP_CONTEXT IrpContext,IN PVCB Vcb,IN PIRP MasterIrp,IN ULONG MultipleIrpCount,IN PIO_RUN IoRuns)1622 FatMultipleAsync (
1623 IN PIRP_CONTEXT IrpContext,
1624 IN PVCB Vcb,
1625 IN PIRP MasterIrp,
1626 IN ULONG MultipleIrpCount,
1627 IN PIO_RUN IoRuns
1628 )
1629
1630 /*++
1631
1632 Routine Description:
1633
1634 This routine first does the initial setup required of a Master IRP that is
1635 going to be completed using associated IRPs. This routine should not
1636 be used if only one async request is needed, instead the single read/write
1637 async routines should be called.
1638
1639 A context parameter is initialized, to serve as a communications area
1640 between here and the common completion routine. This initialization
1641 includes allocation of a spinlock. The spinlock is deallocated in the
1642 FatWaitSync routine, so it is essential that the caller insure that
1643 this routine is always called under all circumstances following a call
1644 to this routine.
1645
1646 Next this routine reads or writes one or more contiguous sectors from
1647 a device asynchronously, and is used if there are multiple reads for a
1648 master IRP. A completion routine is used to synchronize with the
1649 completion of all of the I/O requests started by calls to this routine.
1650
1651 Also, prior to calling this routine the caller must initialize the
1652 IoStatus field in the Context, with the correct success status and byte
1653 count which are expected if all of the parallel transfers complete
1654 successfully. After return this status will be unchanged if all requests
1655 were, in fact, successful. However, if one or more errors occur, the
1656 IoStatus will be modified to reflect the error status and byte count
1657 from the first run (by Vbo) which encountered an error. I/O status
1658 from all subsequent runs will not be indicated.
1659
1660 Arguments:
1661
1662 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
1663
1664 Vcb - Supplies the device to be read
1665
1666 MasterIrp - Supplies the master Irp.
1667
1668 MulitpleIrpCount - Supplies the number of multiple async requests
1669 that will be issued against the master irp.
1670
1671 IoRuns - Supplies an array containing the Vbo, Lbo, BufferOffset, and
1672 ByteCount for all the runs to executed in parallel.
1673
1674 Return Value:
1675
1676 None.
1677
1678 --*/
1679
1680 {
1681 PIRP Irp;
1682 PIO_STACK_LOCATION IrpSp;
1683 PMDL Mdl;
1684 BOOLEAN Wait;
1685 PFAT_IO_CONTEXT Context;
1686 #ifndef __REACTOS__
1687 BOOLEAN IsAWrite = FALSE;
1688 ULONG Length = 0;
1689 #endif
1690
1691 ULONG UnwindRunCount = 0;
1692
1693 BOOLEAN ExceptionExpected = TRUE;
1694
1695 PAGED_CODE();
1696
1697 DebugTrace(+1, Dbg, "FatMultipleAsync\n", 0);
1698 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
1699 DebugTrace( 0, Dbg, "Vcb = %p\n", Vcb );
1700 DebugTrace( 0, Dbg, "MasterIrp = %p\n", MasterIrp );
1701 DebugTrace( 0, Dbg, "MultipleIrpCount = %08lx\n", MultipleIrpCount );
1702 DebugTrace( 0, Dbg, "IoRuns = %08lx\n", IoRuns );
1703
1704 //
1705 // If this I/O originating during FatVerifyVolume, bypass the
1706 // verify logic.
1707 //
1708
1709 if (Vcb->VerifyThread == KeGetCurrentThread()) {
1710
1711 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY );
1712 }
1713
1714 //
1715 // Set up things according to whether this is truely async.
1716 //
1717
1718 Wait = BooleanFlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
1719
1720 Context = IrpContext->FatIoContext;
1721
1722 //
1723 // Finish initializing Context, for use in Read/Write Multiple Asynch.
1724 //
1725
1726 Context->MasterIrp = MasterIrp;
1727
1728 IrpSp = IoGetCurrentIrpStackLocation( MasterIrp );
1729 #ifndef __REACTOS__
1730 IsAWrite = (IrpSp->MajorFunction == IRP_MJ_WRITE);
1731 Length = IrpSp->Parameters.Read.Length;
1732 #endif
1733
1734 _SEH2_TRY {
1735
1736 //
1737 // Itterate through the runs, doing everything that can fail
1738 //
1739
1740 for ( UnwindRunCount = 0;
1741 UnwindRunCount < MultipleIrpCount;
1742 UnwindRunCount++ ) {
1743
1744 //
1745 // Create an associated IRP, making sure there is one stack entry for
1746 // us, as well.
1747 //
1748
1749 IoRuns[UnwindRunCount].SavedIrp = 0;
1750
1751 Irp = IoMakeAssociatedIrp( MasterIrp,
1752 (CCHAR)(Vcb->TargetDeviceObject->StackSize + 1) );
1753
1754 if (Irp == NULL) {
1755
1756 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1757 }
1758
1759 IoRuns[UnwindRunCount].SavedIrp = Irp;
1760
1761 //
1762 // Allocate and build a partial Mdl for the request.
1763 //
1764
1765 Mdl = IoAllocateMdl( (PCHAR)MasterIrp->UserBuffer +
1766 IoRuns[UnwindRunCount].Offset,
1767 IoRuns[UnwindRunCount].ByteCount,
1768 FALSE,
1769 FALSE,
1770 Irp );
1771
1772 if (Mdl == NULL) {
1773
1774 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1775 }
1776
1777 //
1778 // Sanity Check
1779 //
1780
1781 NT_ASSERT( Mdl == Irp->MdlAddress );
1782
1783 IoBuildPartialMdl( MasterIrp->MdlAddress,
1784 Mdl,
1785 (PCHAR)MasterIrp->UserBuffer +
1786 IoRuns[UnwindRunCount].Offset,
1787 IoRuns[UnwindRunCount].ByteCount );
1788
1789 //
1790 // Get the first IRP stack location in the associated Irp
1791 //
1792
1793 IoSetNextIrpStackLocation( Irp );
1794 IrpSp = IoGetCurrentIrpStackLocation( Irp );
1795
1796 //
1797 // Setup the Stack location to describe our read.
1798 //
1799
1800 IrpSp->MajorFunction = IrpContext->MajorFunction;
1801 IrpSp->Parameters.Read.Length = IoRuns[UnwindRunCount].ByteCount;
1802 IrpSp->Parameters.Read.ByteOffset.QuadPart = IoRuns[UnwindRunCount].Vbo;
1803
1804 //
1805 // Set up the completion routine address in our stack frame.
1806 //
1807
1808 IoSetCompletionRoutine( Irp,
1809 Wait ?
1810 &FatMultiSyncCompletionRoutine :
1811 &FatMultiAsyncCompletionRoutine,
1812 Context,
1813 TRUE,
1814 TRUE,
1815 TRUE );
1816
1817 //
1818 // Setup the next IRP stack location in the associated Irp for the disk
1819 // driver beneath us.
1820 //
1821
1822 IrpSp = IoGetNextIrpStackLocation( Irp );
1823
1824 //
1825 // Setup the Stack location to do a read from the disk driver.
1826 //
1827
1828 IrpSp->MajorFunction = IrpContext->MajorFunction;
1829 IrpSp->Parameters.Read.Length = IoRuns[UnwindRunCount].ByteCount;
1830 IrpSp->Parameters.Read.ByteOffset.QuadPart = IoRuns[UnwindRunCount].Lbo;
1831
1832 //
1833 // If this Irp is the result of a WriteThough operation,
1834 // tell the device to write it through.
1835 //
1836
1837 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH )) {
1838
1839 SetFlag( IrpSp->Flags, SL_WRITE_THROUGH );
1840 }
1841
1842 //
1843 // If this I/O requires override verify, bypass the verify logic.
1844 //
1845
1846 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY )) {
1847
1848 SetFlag( IrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
1849 }
1850 }
1851
1852 //
1853 // Now we no longer expect an exception. If the driver raises, we
1854 // must bugcheck, because we do not know how to recover from that
1855 // case.
1856 //
1857
1858 ExceptionExpected = FALSE;
1859
1860 //
1861 // We only need to set the associated IRP count in the master irp to
1862 // make it a master IRP. But we set the count to one more than our
1863 // caller requested, because we do not want the I/O system to complete
1864 // the I/O. We also set our own count.
1865 //
1866
1867 Context->IrpCount = MultipleIrpCount;
1868 MasterIrp->AssociatedIrp.IrpCount = MultipleIrpCount;
1869
1870 if (Wait) {
1871
1872 MasterIrp->AssociatedIrp.IrpCount += 1;
1873 }
1874 else if (FlagOn( Context->Wait.Async.ResourceThreadId, 3 )) {
1875
1876 //
1877 // For async requests if we acquired locks, transition the lock owners to an
1878 // object, since when we return this thread could go away before request
1879 // completion, and the resource package may try to boost priority.
1880 //
1881
1882 if (Context->Wait.Async.Resource != NULL) {
1883
1884 ExSetResourceOwnerPointer( Context->Wait.Async.Resource,
1885 (PVOID)Context->Wait.Async.ResourceThreadId );
1886 }
1887
1888 if (Context->Wait.Async.Resource2 != NULL) {
1889
1890 ExSetResourceOwnerPointer( Context->Wait.Async.Resource2,
1891 (PVOID)Context->Wait.Async.ResourceThreadId );
1892 }
1893 }
1894
1895 //
1896 // Back up a copy of the IrpContext flags for later use in async completion.
1897 //
1898
1899 Context->IrpContextFlags = IrpContext->Flags;
1900
1901 //
1902 // Now that all the dangerous work is done, issue the read requests
1903 //
1904
1905 for (UnwindRunCount = 0;
1906 UnwindRunCount < MultipleIrpCount;
1907 UnwindRunCount++) {
1908
1909 Irp = IoRuns[UnwindRunCount].SavedIrp;
1910
1911 DebugDoit( FatIoCallDriverCount += 1);
1912
1913 //
1914 // If IoCallDriver returns an error, it has completed the Irp
1915 // and the error will be caught by our completion routines
1916 // and dealt with as a normal IO error.
1917 //
1918
1919 (VOID)FatLowLevelReadWrite( IrpContext,
1920 Vcb->TargetDeviceObject,
1921 Irp,
1922 Vcb );
1923 }
1924
1925 //
1926 // We just issued an IO to the storage stack, update the counters indicating so.
1927 //
1928
1929 if (FatDiskAccountingEnabled) {
1930
1931 FatUpdateIOCountersPCW( IsAWrite, Length );
1932 }
1933
1934 } _SEH2_FINALLY {
1935
1936 ULONG i;
1937
1938 DebugUnwind( FatMultipleAsync );
1939
1940 //
1941 // Only allocating the spinlock, making the associated Irps
1942 // and allocating the Mdls can fail.
1943 //
1944
1945 if ( _SEH2_AbnormalTermination() ) {
1946
1947 //
1948 // If the driver raised, we are hosed. He is not supposed to raise,
1949 // and it is impossible for us to figure out how to clean up.
1950 //
1951
1952 if (!ExceptionExpected) {
1953 NT_ASSERT( ExceptionExpected );
1954 #ifdef _MSC_VER
1955 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
1956 #endif
1957 FatBugCheck( 0, 0, 0 );
1958 }
1959
1960 //
1961 // Unwind
1962 //
1963
1964 for (i = 0; i <= UnwindRunCount; i++) {
1965
1966 if ( (Irp = IoRuns[i].SavedIrp) != NULL ) {
1967
1968 if ( Irp->MdlAddress != NULL ) {
1969
1970 IoFreeMdl( Irp->MdlAddress );
1971 }
1972
1973 IoFreeIrp( Irp );
1974 }
1975 }
1976 }
1977
1978 //
1979 // And return to our caller
1980 //
1981
1982 DebugTrace(-1, Dbg, "FatMultipleAsync -> VOID\n", 0);
1983 } _SEH2_END;
1984
1985 return;
1986 }
1987
1988
1989 VOID
FatSingleAsync(IN PIRP_CONTEXT IrpContext,IN PVCB Vcb,IN LBO Lbo,IN ULONG ByteCount,IN PIRP Irp)1990 FatSingleAsync (
1991 IN PIRP_CONTEXT IrpContext,
1992 IN PVCB Vcb,
1993 IN LBO Lbo,
1994 IN ULONG ByteCount,
1995 IN PIRP Irp
1996 )
1997
1998 /*++
1999
2000 Routine Description:
2001
2002 This routine reads or writes one or more contiguous sectors from a device
2003 asynchronously, and is used if there is only one read necessary to
2004 complete the IRP. It implements the read by simply filling
2005 in the next stack frame in the Irp, and passing it on. The transfer
2006 occurs to the single buffer originally specified in the user request.
2007
2008 Arguments:
2009
2010 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
2011
2012 Vcb - Supplies the device to read
2013
2014 Lbo - Supplies the starting Logical Byte Offset to begin reading from
2015
2016 ByteCount - Supplies the number of bytes to read from the device
2017
2018 Irp - Supplies the master Irp to associated with the async
2019 request.
2020
2021 Return Value:
2022
2023 None.
2024
2025 --*/
2026
2027 {
2028 PIO_STACK_LOCATION IrpSp;
2029 PFAT_IO_CONTEXT Context;
2030 #ifndef __REACTOS__
2031 BOOLEAN IsAWrite = FALSE;
2032 #endif
2033
2034 PAGED_CODE();
2035
2036 DebugTrace(+1, Dbg, "FatSingleAsync\n", 0);
2037 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
2038 DebugTrace( 0, Dbg, "Vcb = %p\n", Vcb );
2039 DebugTrace( 0, Dbg, "Lbo = %08lx\n", Lbo);
2040 DebugTrace( 0, Dbg, "ByteCount = %08lx\n", ByteCount);
2041 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
2042
2043 //
2044 // If this I/O originating during FatVerifyVolume, bypass the
2045 // verify logic.
2046 //
2047
2048 if (Vcb->VerifyThread == KeGetCurrentThread()) {
2049
2050 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY );
2051 }
2052
2053 //
2054 // Set up the completion routine address in our stack frame.
2055 //
2056
2057 IoSetCompletionRoutine( Irp,
2058 FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT) ?
2059 &FatSingleSyncCompletionRoutine :
2060 &FatSingleAsyncCompletionRoutine,
2061 IrpContext->FatIoContext,
2062 TRUE,
2063 TRUE,
2064 TRUE );
2065
2066 //
2067 // Setup the next IRP stack location in the associated Irp for the disk
2068 // driver beneath us.
2069 //
2070
2071 IrpSp = IoGetNextIrpStackLocation( Irp );
2072
2073 //
2074 // Setup the Stack location to do a read from the disk driver.
2075 //
2076
2077 IrpSp->MajorFunction = IrpContext->MajorFunction;
2078 IrpSp->Parameters.Read.Length = ByteCount;
2079 IrpSp->Parameters.Read.ByteOffset.QuadPart = Lbo;
2080
2081 #ifndef __REACTOS__
2082 IsAWrite = (IrpSp->MajorFunction == IRP_MJ_WRITE);
2083 #endif
2084
2085 //
2086 // If this Irp is the result of a WriteThough operation,
2087 // tell the device to write it through.
2088 //
2089
2090 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH )) {
2091
2092 SetFlag( IrpSp->Flags, SL_WRITE_THROUGH );
2093 }
2094
2095 //
2096 // If this I/O requires override verify, bypass the verify logic.
2097 //
2098
2099 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY )) {
2100
2101 SetFlag( IrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
2102 }
2103
2104 //
2105 // For async requests if we acquired locks, transition the lock owners to an
2106 // object, since when we return this thread could go away before request
2107 // completion, and the resource package may try to boost priority.
2108 //
2109
2110 if (!FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ) &&
2111 FlagOn( IrpContext->FatIoContext->Wait.Async.ResourceThreadId, 3 )) {
2112
2113 Context = IrpContext->FatIoContext;
2114
2115 if (Context->Wait.Async.Resource != NULL) {
2116
2117 ExSetResourceOwnerPointer( Context->Wait.Async.Resource,
2118 (PVOID)Context->Wait.Async.ResourceThreadId );
2119 }
2120
2121 if (Context->Wait.Async.Resource2 != NULL) {
2122
2123 ExSetResourceOwnerPointer( Context->Wait.Async.Resource2,
2124 (PVOID)Context->Wait.Async.ResourceThreadId );
2125 }
2126 }
2127
2128 //
2129 // Back up a copy of the IrpContext flags for later use in async completion.
2130 //
2131
2132 IrpContext->FatIoContext->IrpContextFlags = IrpContext->Flags;
2133
2134 //
2135 // Issue the read request
2136 //
2137
2138 DebugDoit( FatIoCallDriverCount += 1);
2139
2140 //
2141 // If IoCallDriver returns an error, it has completed the Irp
2142 // and the error will be caught by our completion routines
2143 // and dealt with as a normal IO error.
2144 //
2145
2146 (VOID)FatLowLevelReadWrite( IrpContext,
2147 Vcb->TargetDeviceObject,
2148 Irp,
2149 Vcb );
2150
2151 //
2152 // We just issued an IO to the storage stack, update the counters indicating so.
2153 //
2154
2155 if (FatDiskAccountingEnabled) {
2156
2157 FatUpdateIOCountersPCW( IsAWrite, ByteCount );
2158 }
2159
2160 //
2161 // And return to our caller
2162 //
2163
2164 DebugTrace(-1, Dbg, "FatSingleAsync -> VOID\n", 0);
2165
2166 return;
2167 }
2168
2169
2170 VOID
FatSingleNonAlignedSync(IN PIRP_CONTEXT IrpContext,IN PVCB Vcb,IN PUCHAR Buffer,IN LBO Lbo,IN ULONG ByteCount,IN PIRP Irp)2171 FatSingleNonAlignedSync (
2172 IN PIRP_CONTEXT IrpContext,
2173 IN PVCB Vcb,
2174 IN PUCHAR Buffer,
2175 IN LBO Lbo,
2176 IN ULONG ByteCount,
2177 IN PIRP Irp
2178 )
2179
2180 /*++
2181
2182 Routine Description:
2183
2184 This routine reads or writes one or more contiguous sectors from a device
2185 Synchronously, and does so to a buffer that must come from non paged
2186 pool. It saves a pointer to the Irp's original Mdl, and creates a new
2187 one describing the given buffer. It implements the read by simply filling
2188 in the next stack frame in the Irp, and passing it on. The transfer
2189 occurs to the single buffer originally specified in the user request.
2190
2191 Arguments:
2192
2193 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
2194
2195 Vcb - Supplies the device to read
2196
2197 Buffer - Supplies a buffer from non-paged pool.
2198
2199 Lbo - Supplies the starting Logical Byte Offset to begin reading from
2200
2201 ByteCount - Supplies the number of bytes to read from the device
2202
2203 Irp - Supplies the master Irp to associated with the async
2204 request.
2205
2206 Return Value:
2207
2208 None.
2209
2210 --*/
2211
2212 {
2213 PIO_STACK_LOCATION IrpSp;
2214
2215 PMDL Mdl;
2216 PMDL SavedMdl;
2217 #ifndef __REACTOS__
2218 BOOLEAN IsAWrite = FALSE;
2219 #endif
2220
2221 PAGED_CODE();
2222
2223 DebugTrace(+1, Dbg, "FatSingleNonAlignedAsync\n", 0);
2224 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
2225 DebugTrace( 0, Dbg, "Vcb = %p\n", Vcb );
2226 DebugTrace( 0, Dbg, "Buffer = %p\n", Buffer );
2227 DebugTrace( 0, Dbg, "Lbo = %08lx\n", Lbo);
2228 DebugTrace( 0, Dbg, "ByteCount = %08lx\n", ByteCount);
2229 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
2230
2231 //
2232 // Create a new Mdl describing the buffer, saving the current one in the
2233 // Irp
2234 //
2235
2236 SavedMdl = Irp->MdlAddress;
2237
2238 Irp->MdlAddress = 0;
2239
2240 Mdl = IoAllocateMdl( Buffer,
2241 ByteCount,
2242 FALSE,
2243 FALSE,
2244 Irp );
2245
2246 if (Mdl == NULL) {
2247
2248 Irp->MdlAddress = SavedMdl;
2249
2250 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
2251 }
2252
2253 //
2254 // Lock the new Mdl in memory.
2255 //
2256
2257 _SEH2_TRY {
2258
2259 MmProbeAndLockPages( Mdl, KernelMode, IoWriteAccess );
2260
2261 } _SEH2_FINALLY {
2262
2263 if ( _SEH2_AbnormalTermination() ) {
2264
2265 IoFreeMdl( Mdl );
2266 Irp->MdlAddress = SavedMdl;
2267 }
2268 } _SEH2_END;
2269
2270 //
2271 // Set up the completion routine address in our stack frame.
2272 //
2273
2274 IoSetCompletionRoutine( Irp,
2275 &FatSingleSyncCompletionRoutine,
2276 IrpContext->FatIoContext,
2277 TRUE,
2278 TRUE,
2279 TRUE );
2280
2281 //
2282 // Setup the next IRP stack location in the associated Irp for the disk
2283 // driver beneath us.
2284 //
2285
2286 IrpSp = IoGetNextIrpStackLocation( Irp );
2287
2288 //
2289 // Setup the Stack location to do a read from the disk driver.
2290 //
2291
2292 IrpSp->MajorFunction = IrpContext->MajorFunction;
2293 IrpSp->Parameters.Read.Length = ByteCount;
2294 IrpSp->Parameters.Read.ByteOffset.QuadPart = Lbo;
2295
2296 #ifndef __REACTOS__
2297 IsAWrite = (IrpSp->MajorFunction == IRP_MJ_WRITE);
2298 #endif
2299
2300 //
2301 // If this I/O originating during FatVerifyVolume, bypass the
2302 // verify logic.
2303 //
2304
2305 if (Vcb->VerifyThread == KeGetCurrentThread()) {
2306
2307 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY );
2308 }
2309
2310 //
2311 // If this I/O requires override verify, bypass the verify logic.
2312 //
2313
2314 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY )) {
2315
2316 SetFlag( IrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
2317 }
2318
2319 //
2320 // Issue the read request
2321 //
2322
2323 DebugDoit( FatIoCallDriverCount += 1);
2324
2325 //
2326 // If IoCallDriver returns an error, it has completed the Irp
2327 // and the error will be caught by our completion routines
2328 // and dealt with as a normal IO error.
2329 //
2330
2331 _SEH2_TRY {
2332
2333 (VOID)FatLowLevelReadWrite( IrpContext,
2334 Vcb->TargetDeviceObject,
2335 Irp,
2336 Vcb );
2337
2338 FatWaitSync( IrpContext );
2339
2340 } _SEH2_FINALLY {
2341
2342 MmUnlockPages( Mdl );
2343 IoFreeMdl( Mdl );
2344 Irp->MdlAddress = SavedMdl;
2345 } _SEH2_END;
2346
2347 //
2348 // We just issued an IO to the storage stack, update the counters indicating so.
2349 //
2350
2351 if (FatDiskAccountingEnabled) {
2352
2353 FatUpdateIOCountersPCW( IsAWrite, ByteCount );
2354 }
2355
2356 //
2357 // And return to our caller
2358 //
2359
2360 DebugTrace(-1, Dbg, "FatSingleNonAlignedSync -> VOID\n", 0);
2361
2362 return;
2363 }
2364
2365
2366 VOID
FatWaitSync(IN PIRP_CONTEXT IrpContext)2367 FatWaitSync (
2368 IN PIRP_CONTEXT IrpContext
2369 )
2370
2371 /*++
2372
2373 Routine Description:
2374
2375 This routine waits for one or more previously started I/O requests
2376 from the above routines, by simply waiting on the event.
2377
2378 Arguments:
2379
2380 Return Value:
2381
2382 None
2383
2384 --*/
2385
2386 {
2387 PAGED_CODE();
2388
2389 DebugTrace(+1, Dbg, "FatWaitSync, Context = %p\n", IrpContext->FatIoContext );
2390
2391 KeWaitForSingleObject( &IrpContext->FatIoContext->Wait.SyncEvent,
2392 Executive, KernelMode, FALSE, NULL );
2393
2394 KeClearEvent( &IrpContext->FatIoContext->Wait.SyncEvent );
2395
2396 DebugTrace(-1, Dbg, "FatWaitSync -> VOID\n", 0 );
2397 }
2398
2399
2400 //
2401 // Internal Support Routine
2402 //
2403
2404 NTSTATUS
2405 NTAPI
2406 FatMultiSyncCompletionRoutine (
2407 _In_ PDEVICE_OBJECT DeviceObject,
2408 _In_ PIRP Irp,
2409 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
2410 )
2411
2412 /*++
2413
2414 Routine Description:
2415
2416 This is the completion routine for all reads and writes started via
2417 FatRead/WriteMultipleAsynch. It must synchronize its operation for
2418 multiprocessor environments with itself on all other processors, via
2419 a spin lock found via the Context parameter.
2420
2421 The completion routine has the following responsibilities:
2422
2423 If the individual request was completed with an error, then
2424 this completion routine must see if this is the first error
2425 (essentially by Vbo), and if so it must correctly reduce the
2426 byte count and remember the error status in the Context.
2427
2428 If the IrpCount goes to 1, then it sets the event in the Context
2429 parameter to signal the caller that all of the asynch requests
2430 are done.
2431
2432 Arguments:
2433
2434 DeviceObject - Pointer to the file system device object.
2435
2436 Irp - Pointer to the associated Irp which is being completed. (This
2437 Irp will no longer be accessible after this routine returns.)
2438
2439 Contxt - The context parameter which was specified for all of
2440 the multiple asynch I/O requests for this MasterIrp.
2441
2442 Return Value:
2443
2444 The routine returns STATUS_MORE_PROCESSING_REQUIRED so that we can
2445 immediately complete the Master Irp without being in a race condition
2446 with the IoCompleteRequest thread trying to decrement the IrpCount in
2447 the Master Irp.
2448
2449 --*/
2450
2451 {
2452
2453 PFAT_IO_CONTEXT Context = Contxt;
2454 PIRP MasterIrp = Context->MasterIrp;
2455
2456 DebugTrace(+1, Dbg, "FatMultiSyncCompletionRoutine, Context = %p\n", Context );
2457
2458 //
2459 // If we got an error (or verify required), remember it in the Irp
2460 //
2461
2462 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
2463
2464 #if DBG
2465 if(!( NT_SUCCESS( FatBreakOnInterestingIoCompletion ) || Irp->IoStatus.Status != FatBreakOnInterestingIoCompletion )) {
2466 DbgBreakPoint();
2467 }
2468 #endif
2469
2470 #ifdef SYSCACHE_COMPILE
2471 DbgPrint( "FAT SYSCACHE: MultiSync (IRP %08x for Master %08x) -> %08x\n", Irp, MasterIrp, Irp->IoStatus );
2472 #endif
2473
2474 MasterIrp->IoStatus = Irp->IoStatus;
2475 }
2476
2477 NT_ASSERT( !(NT_SUCCESS( Irp->IoStatus.Status ) && Irp->IoStatus.Information == 0 ));
2478
2479 //
2480 // We must do this here since IoCompleteRequest won't get a chance
2481 // on this associated Irp.
2482 //
2483
2484 IoFreeMdl( Irp->MdlAddress );
2485 IoFreeIrp( Irp );
2486
2487 if (InterlockedDecrement(&Context->IrpCount) == 0) {
2488
2489 FatDoCompletionZero( MasterIrp, Context );
2490 KeSetEvent( &Context->Wait.SyncEvent, 0, FALSE );
2491 }
2492
2493 DebugTrace(-1, Dbg, "FatMultiSyncCompletionRoutine -> SUCCESS\n", 0 );
2494
2495 UNREFERENCED_PARAMETER( DeviceObject );
2496
2497 return STATUS_MORE_PROCESSING_REQUIRED;
2498 }
2499
2500
2501 //
2502 // Internal Support Routine
2503 //
2504
2505 NTSTATUS
2506 NTAPI
2507 FatMultiAsyncCompletionRoutine (
2508 _In_ PDEVICE_OBJECT DeviceObject,
2509 _In_ PIRP Irp,
2510 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
2511 )
2512
2513 /*++
2514
2515 Routine Description:
2516
2517 This is the completion routine for all reads and writes started via
2518 FatRead/WriteMultipleAsynch. It must synchronize its operation for
2519 multiprocessor environments with itself on all other processors, via
2520 a spin lock found via the Context parameter.
2521
2522 The completion routine has has the following responsibilities:
2523
2524 If the individual request was completed with an error, then
2525 this completion routine must see if this is the first error
2526 (essentially by Vbo), and if so it must correctly reduce the
2527 byte count and remember the error status in the Context.
2528
2529 If the IrpCount goes to 1, then it sets the event in the Context
2530 parameter to signal the caller that all of the asynch requests
2531 are done.
2532
2533 Arguments:
2534
2535 DeviceObject - Pointer to the file system device object.
2536
2537 Irp - Pointer to the associated Irp which is being completed. (This
2538 Irp will no longer be accessible after this routine returns.)
2539
2540 Contxt - The context parameter which was specified for all of
2541 the multiple asynch I/O requests for this MasterIrp.
2542
2543 Return Value:
2544
2545 The routine returns STATUS_MORE_PROCESSING_REQUIRED so that we can
2546 immediately complete the Master Irp without being in a race condition
2547 with the IoCompleteRequest thread trying to decrement the IrpCount in
2548 the Master Irp.
2549
2550 --*/
2551
2552 {
2553 NTSTATUS Status = STATUS_SUCCESS;
2554 PFAT_IO_CONTEXT Context = Contxt;
2555 PIRP MasterIrp = Context->MasterIrp;
2556 BOOLEAN PostRequest = FALSE;
2557
2558 DebugTrace(+1, Dbg, "FatMultiAsyncCompletionRoutine, Context = %p\n", Context );
2559
2560 //
2561 // If we got an error (or verify required), remember it in the Irp
2562 //
2563
2564 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
2565
2566 #if DBG
2567 if (!( NT_SUCCESS( FatBreakOnInterestingIoCompletion ) || Irp->IoStatus.Status != FatBreakOnInterestingIoCompletion )) {
2568 DbgBreakPoint();
2569 }
2570 #endif
2571
2572 #ifdef SYSCACHE_COMPILE
2573 DbgPrint( "FAT SYSCACHE: MultiAsync (IRP %08x for Master %08x) -> %08x\n", Irp, MasterIrp, Irp->IoStatus );
2574 #endif
2575
2576 MasterIrp->IoStatus = Irp->IoStatus;
2577
2578 }
2579
2580 NT_ASSERT( !(NT_SUCCESS( Irp->IoStatus.Status ) && Irp->IoStatus.Information == 0 ));
2581
2582 if (InterlockedDecrement(&Context->IrpCount) == 0) {
2583
2584 FatDoCompletionZero( MasterIrp, Context );
2585
2586 if (NT_SUCCESS(MasterIrp->IoStatus.Status)) {
2587
2588 MasterIrp->IoStatus.Information =
2589 Context->Wait.Async.RequestedByteCount;
2590
2591 NT_ASSERT(MasterIrp->IoStatus.Information != 0);
2592
2593 //
2594 // Now if this wasn't PagingIo, set either the read or write bit.
2595 //
2596
2597 if (!FlagOn(MasterIrp->Flags, IRP_PAGING_IO)) {
2598
2599 SetFlag( Context->Wait.Async.FileObject->Flags,
2600 IoGetCurrentIrpStackLocation(MasterIrp)->MajorFunction == IRP_MJ_READ ?
2601 FO_FILE_FAST_IO_READ : FO_FILE_MODIFIED );
2602 }
2603
2604 } else {
2605
2606 //
2607 // Post STATUS_VERIFY_REQUIRED failures. Only post top level IRPs, because recursive I/Os
2608 // cannot process volume verification.
2609 //
2610
2611 if (!FlagOn(Context->IrpContextFlags, IRP_CONTEXT_FLAG_RECURSIVE_CALL) &&
2612 (MasterIrp->IoStatus.Status == STATUS_VERIFY_REQUIRED)) {
2613 PostRequest = TRUE;
2614 }
2615
2616 }
2617
2618 //
2619 // If this was a special async write, decrement the count. Set the
2620 // event if this was the final outstanding I/O for the file. We will
2621 // also want to queue an APC to deal with any error conditionions.
2622 //
2623 _Analysis_assume_(!(Context->Wait.Async.NonPagedFcb) &&
2624 (ExInterlockedAddUlong( &Context->Wait.Async.NonPagedFcb->OutstandingAsyncWrites,
2625 0xffffffff,
2626 &FatData.GeneralSpinLock ) != 1));
2627 if ((Context->Wait.Async.NonPagedFcb) &&
2628 (ExInterlockedAddUlong( &Context->Wait.Async.NonPagedFcb->OutstandingAsyncWrites,
2629 0xffffffff,
2630 &FatData.GeneralSpinLock ) == 1)) {
2631
2632 KeSetEvent( Context->Wait.Async.NonPagedFcb->OutstandingAsyncEvent, 0, FALSE );
2633 }
2634
2635 //
2636 // Now release the resources.
2637 //
2638
2639 if (Context->Wait.Async.Resource != NULL) {
2640
2641 ExReleaseResourceForThreadLite( Context->Wait.Async.Resource,
2642 Context->Wait.Async.ResourceThreadId );
2643 }
2644
2645 if (Context->Wait.Async.Resource2 != NULL) {
2646
2647 ExReleaseResourceForThreadLite( Context->Wait.Async.Resource2,
2648 Context->Wait.Async.ResourceThreadId );
2649 }
2650
2651 //
2652 // Mark the master Irp pending
2653 //
2654
2655 IoMarkIrpPending( MasterIrp );
2656
2657 //
2658 // and finally, free the context record.
2659 //
2660
2661 ExFreePool( Context );
2662
2663 if (PostRequest) {
2664
2665 PIRP_CONTEXT IrpContext = NULL;
2666
2667 _SEH2_TRY {
2668
2669 IrpContext = FatCreateIrpContext(Irp, TRUE );
2670 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_RECURSIVE_CALL);
2671 FatFsdPostRequest( IrpContext, Irp );
2672 Status = STATUS_MORE_PROCESSING_REQUIRED;
2673
_SEH2_EXCEPT(FatExceptionFilter (NULL,_SEH2_GetExceptionInformation ()))2674 } _SEH2_EXCEPT( FatExceptionFilter(NULL, _SEH2_GetExceptionInformation()) ) {
2675
2676 //
2677 // If we failed to post the IRP, we just have to return the failure
2678 // to the user. :(
2679 //
2680
2681 NOTHING;
2682 } _SEH2_END;
2683 }
2684 }
2685
2686 DebugTrace(-1, Dbg, "FatMultiAsyncCompletionRoutine -> SUCCESS\n", 0 );
2687
2688 UNREFERENCED_PARAMETER( DeviceObject );
2689
2690 return Status;
2691 }
2692
2693
2694 NTSTATUS
FatPagingFileErrorHandler(IN PIRP Irp,IN PKEVENT Event OPTIONAL)2695 FatPagingFileErrorHandler (
2696 IN PIRP Irp,
2697 IN PKEVENT Event OPTIONAL
2698 )
2699
2700 /*++
2701
2702 Routine Description:
2703
2704 This routine attempts to guarantee that the media is marked dirty
2705 with the surface test bit if a paging file IO fails.
2706
2707 The work done here has several basic problems
2708
2709 1) when paging file writes start failing, this is a good sign
2710 that the rest of the system is about to fall down around us
2711
2712 2) it has no forward progress guarantee
2713
2714 With Whistler, it is actually quite intentional that we're rejiggering
2715 the paging file write path to make forward progress at all times. This
2716 means that the cases where it *does* fail, we're truly seeing media errors
2717 and this is probably going to mean the paging file is going to stop working
2718 very soon.
2719
2720 It'd be nice to make this guarantee progress. It would need
2721
2722 1) a guaranteed worker thread which can only be used by items which
2723 will make forward progress (i.e., not block out this one)
2724
2725 2) the virtual volume file's pages containing the boot sector and
2726 1st FAT entry would have to be pinned resident and have a guaranteed
2727 mapping address
2728
2729 3) mark volume would have to have a stashed irp/mdl and roll the write
2730 irp, or use a generalized mechanism to guarantee issue of the irp
2731
2732 4) the lower stack would have to guarantee progress
2733
2734 Of these, 1 and 4 may actually exist shortly.
2735
2736 Arguments:
2737
2738 Irp - Pointer to the associated Irp which is being failed.
2739
2740 Event - Pointer to optional event to be signalled instead of completing
2741 the IRP
2742
2743 Return Value:
2744
2745 Returns STATUS_MORE_PROCESSING_REQUIRED if we managed to queue off the workitem,
2746 STATUS_SUCCESS otherwise.
2747
2748 --*/
2749
2750 {
2751 NTSTATUS Status;
2752
2753 //
2754 // If this was a media error, we want to chkdsk /r the next time we boot.
2755 //
2756
2757 if (FsRtlIsTotalDeviceFailure(Irp->IoStatus.Status)) {
2758
2759 Status = STATUS_SUCCESS;
2760
2761 } else {
2762
2763 PCLEAN_AND_DIRTY_VOLUME_PACKET Packet;
2764
2765 //
2766 // We are going to try to mark the volume needing recover.
2767 // If we can't get pool, oh well....
2768 //
2769
2770 Packet = ExAllocatePoolWithTag(NonPagedPoolNx, sizeof(CLEAN_AND_DIRTY_VOLUME_PACKET), ' taF');
2771
2772 if ( Packet ) {
2773
2774 Packet->Vcb = &((PVOLUME_DEVICE_OBJECT)IoGetCurrentIrpStackLocation(Irp)->DeviceObject)->Vcb;
2775 Packet->Irp = Irp;
2776 Packet->Event = Event;
2777
2778 ExInitializeWorkItem( &Packet->Item,
2779 &FatFspMarkVolumeDirtyWithRecover,
2780 Packet );
2781
2782 #ifdef _MSC_VER
2783 #pragma prefast( suppress:28159, "prefast indicates this is obsolete, but it is ok for fastfat to use it" )
2784 #endif
2785 ExQueueWorkItem( &Packet->Item, CriticalWorkQueue );
2786
2787 Status = STATUS_MORE_PROCESSING_REQUIRED;
2788
2789 } else {
2790
2791 Status = STATUS_SUCCESS;
2792 }
2793 }
2794
2795 return Status;
2796 }
2797
2798
2799 //
2800 // Internal Support Routine
2801 //
2802
2803 NTSTATUS
2804 NTAPI
2805 FatPagingFileCompletionRoutineCatch (
2806 _In_ PDEVICE_OBJECT DeviceObject,
2807 _In_ PIRP Irp,
2808 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
2809 )
2810
2811 /*++
2812
2813 Routine Description:
2814
2815 This is the completion routine for all reads and writes started via
2816 FatPagingFileIo that reuse the master irp (that we have to catch
2817 on the way back). It is always invoked.
2818
2819 The completion routine has has the following responsibility:
2820
2821 If the error implies a media problem, it enqueues a
2822 worker item to write out the dirty bit so that the next
2823 time we run we will do a autochk /r. This is not forward
2824 progress guaranteed at the moment.
2825
2826 Clean up the Mdl used for this partial request.
2827
2828 Note that if the Irp is failing, the error code is already where
2829 we want it.
2830
2831 Arguments:
2832
2833 DeviceObject - Pointer to the file system device object.
2834
2835 Irp - Pointer to the associated Irp which is being completed. (This
2836 Irp will no longer be accessible after this routine returns.)
2837
2838 MasterIrp - Pointer to the master Irp.
2839
2840 Return Value:
2841
2842 Always returns STATUS_MORE_PROCESSING_REQUIRED.
2843
2844 --*/
2845
2846 {
2847 PFAT_PAGING_FILE_CONTEXT Context = (PFAT_PAGING_FILE_CONTEXT) Contxt;
2848
2849 UNREFERENCED_PARAMETER( DeviceObject );
2850
2851 DebugTrace(+1, Dbg, "FatPagingFileCompletionRoutineCatch, Context = %p\n", Context );
2852
2853 //
2854 // Cleanup the existing Mdl, perhaps by returning the reserve.
2855 //
2856
2857 if (Irp->MdlAddress == FatReserveMdl) {
2858
2859 MmPrepareMdlForReuse( Irp->MdlAddress );
2860 KeSetEvent( &FatReserveEvent, 0, FALSE );
2861
2862 } else {
2863
2864 IoFreeMdl( Irp->MdlAddress );
2865 }
2866
2867 //
2868 // Restore the original Mdl.
2869 //
2870
2871 Irp->MdlAddress = Context->RestoreMdl;
2872
2873 DebugTrace(-1, Dbg, "FatPagingFileCompletionRoutine => (done)\n", 0 );
2874
2875 //
2876 // If the IRP is succeeding or the failure handler did not post off the
2877 // completion, we're done and should set the event to let the master
2878 // know the IRP is his again.
2879 //
2880
2881 if (NT_SUCCESS( Irp->IoStatus.Status ) ||
2882 FatPagingFileErrorHandler( Irp, &Context->Event ) == STATUS_SUCCESS) {
2883
2884 KeSetEvent( &Context->Event, 0, FALSE );
2885 }
2886
2887 return STATUS_MORE_PROCESSING_REQUIRED;
2888
2889 }
2890
2891
2892 //
2893 // Internal Support Routine
2894 //
2895
2896 NTSTATUS
2897 NTAPI
2898 FatPagingFileCompletionRoutine (
2899 _In_ PDEVICE_OBJECT DeviceObject,
2900 _In_ PIRP Irp,
2901 _In_reads_opt_(_Inexpressible_("varies")) PVOID MasterIrp
2902 )
2903
2904 /*++
2905
2906 Routine Description:
2907
2908 This is the completion routine for all reads and writes started via
2909 FatPagingFileIo. It should only be invoked on error or cancel.
2910
2911 The completion routine has has the following responsibility:
2912
2913 Since the individual request was completed with an error,
2914 this completion routine must stuff it into the master irp.
2915
2916 If the error implies a media problem, it also enqueues a
2917 worker item to write out the dirty bit so that the next
2918 time we run we will do a autochk /r
2919
2920 Arguments:
2921
2922 DeviceObject - Pointer to the file system device object.
2923
2924 Irp - Pointer to the associated Irp which is being completed. (This
2925 Irp will no longer be accessible after this routine returns.)
2926
2927 MasterIrp - Pointer to the master Irp.
2928
2929 Return Value:
2930
2931 Always returns STATUS_SUCCESS.
2932
2933 --*/
2934
2935 {
2936 DebugTrace(+1, Dbg, "FatPagingFileCompletionRoutine, MasterIrp = %p\n", MasterIrp );
2937
2938 //
2939 // If we got an error (or verify required), remember it in the Irp
2940 //
2941
2942 NT_ASSERT( !NT_SUCCESS( Irp->IoStatus.Status ));
2943
2944 //
2945 // If we were invoked with an assoicated Irp, copy the error over.
2946 //
2947
2948 if (Irp != MasterIrp) {
2949
2950 ((PIRP)MasterIrp)->IoStatus = Irp->IoStatus;
2951 }
2952
2953 DebugTrace(-1, Dbg, "FatPagingFileCompletionRoutine => (done)\n", 0 );
2954
2955 UNREFERENCED_PARAMETER( DeviceObject );
2956
2957 return FatPagingFileErrorHandler( Irp, NULL );
2958 }
2959
2960
2961 //
2962 // Internal Support Routine
2963 //
2964
2965 NTSTATUS
2966 NTAPI
2967 FatSpecialSyncCompletionRoutine (
2968 _In_ PDEVICE_OBJECT DeviceObject,
2969 _In_ PIRP Irp,
2970 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
2971 )
2972
2973 /*++
2974
2975 Routine Description:
2976
2977 This is the completion routine for a special set of sub irps
2978 that have to work at APC level.
2979
2980 The completion routine has has the following responsibilities:
2981
2982 It sets the event passed as the context to signal that the
2983 request is done.
2984
2985 By doing this, the caller will be released before final APC
2986 completion with knowledge that the IRP is finished. Final
2987 completion will occur at an indeterminate time after this
2988 occurs, and by using this completion routine the caller expects
2989 to not have any output or status returned. A junk user Iosb
2990 should be used to capture the status without forcing Io to take
2991 an exception on NULL.
2992
2993 Arguments:
2994
2995 DeviceObject - Pointer to the file system device object.
2996
2997 Irp - Pointer to the Irp for this request. (This Irp will no longer
2998 be accessible after this routine returns.)
2999
3000 Contxt - The context parameter which was specified in the call to
3001 FatRead/WriteSingleAsynch.
3002
3003 Return Value:
3004
3005 Currently always returns STATUS_SUCCESS.
3006
3007 --*/
3008
3009 {
3010 PFAT_SYNC_CONTEXT SyncContext = (PFAT_SYNC_CONTEXT)Contxt;
3011
3012 UNREFERENCED_PARAMETER( Irp );
3013
3014 DebugTrace(+1, Dbg, "FatSpecialSyncCompletionRoutine, Context = %p\n", Contxt );
3015
3016 SyncContext->Iosb = Irp->IoStatus;
3017
3018 KeSetEvent( &SyncContext->Event, 0, FALSE );
3019
3020 DebugTrace(-1, Dbg, "FatSpecialSyncCompletionRoutine -> STATUS_SUCCESS\n", 0 );
3021
3022 UNREFERENCED_PARAMETER( DeviceObject );
3023
3024 return STATUS_SUCCESS;
3025 }
3026
3027
3028 //
3029 // Internal Support Routine
3030 //
3031
3032 NTSTATUS
3033 NTAPI
3034 FatSingleSyncCompletionRoutine (
3035 _In_ PDEVICE_OBJECT DeviceObject,
3036 _In_ PIRP Irp,
3037 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
3038 )
3039
3040 /*++
3041
3042 Routine Description:
3043
3044 This is the completion routine for all reads and writes started via
3045 FatRead/WriteSingleAsynch.
3046
3047 The completion routine has has the following responsibilities:
3048
3049 Copy the I/O status from the Irp to the Context, since the Irp
3050 will no longer be accessible.
3051
3052 It sets the event in the Context parameter to signal the caller
3053 that all of the asynch requests are done.
3054
3055 Arguments:
3056
3057 DeviceObject - Pointer to the file system device object.
3058
3059 Irp - Pointer to the Irp for this request. (This Irp will no longer
3060 be accessible after this routine returns.)
3061
3062 Contxt - The context parameter which was specified in the call to
3063 FatRead/WriteSingleAsynch.
3064
3065 Return Value:
3066
3067 Currently always returns STATUS_SUCCESS.
3068
3069 --*/
3070
3071 {
3072 PFAT_IO_CONTEXT Context = Contxt;
3073
3074 DebugTrace(+1, Dbg, "FatSingleSyncCompletionRoutine, Context = %p\n", Context );
3075
3076 FatDoCompletionZero( Irp, Context );
3077
3078 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
3079
3080 #if DBG
3081 if(!( NT_SUCCESS( FatBreakOnInterestingIoCompletion ) || Irp->IoStatus.Status != FatBreakOnInterestingIoCompletion )) {
3082 DbgBreakPoint();
3083 }
3084 #endif
3085
3086 }
3087
3088 NT_ASSERT( !(NT_SUCCESS( Irp->IoStatus.Status ) && Irp->IoStatus.Information == 0 ));
3089
3090 KeSetEvent( &Context->Wait.SyncEvent, 0, FALSE );
3091
3092 DebugTrace(-1, Dbg, "FatSingleSyncCompletionRoutine -> STATUS_MORE_PROCESSING_REQUIRED\n", 0 );
3093
3094 UNREFERENCED_PARAMETER( DeviceObject );
3095
3096 return STATUS_MORE_PROCESSING_REQUIRED;
3097 }
3098
3099
3100 //
3101 // Internal Support Routine
3102 //
3103
3104 NTSTATUS
3105 NTAPI
3106 FatSingleAsyncCompletionRoutine (
3107 _In_ PDEVICE_OBJECT DeviceObject,
3108 _In_ PIRP Irp,
3109 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
3110 )
3111
3112 /*++
3113
3114 Routine Description:
3115
3116 This is the completion routine for all reads and writes started via
3117 FatRead/WriteSingleAsynch.
3118
3119 The completion routine has has the following responsibilities:
3120
3121 Copy the I/O status from the Irp to the Context, since the Irp
3122 will no longer be accessible.
3123
3124 It sets the event in the Context parameter to signal the caller
3125 that all of the asynch requests are done.
3126
3127 Arguments:
3128
3129 DeviceObject - Pointer to the file system device object.
3130
3131 Irp - Pointer to the Irp for this request. (This Irp will no longer
3132 be accessible after this routine returns.)
3133
3134 Contxt - The context parameter which was specified in the call to
3135 FatRead/WriteSingleAsynch.
3136
3137 Return Value:
3138
3139 Currently always returns STATUS_SUCCESS.
3140
3141 --*/
3142
3143 {
3144 NTSTATUS Status = STATUS_SUCCESS;
3145
3146 PFAT_IO_CONTEXT Context = Contxt;
3147 BOOLEAN PostRequest = FALSE;
3148
3149 DebugTrace(+1, Dbg, "FatSingleAsyncCompletionRoutine, Context = %p\n", Context );
3150
3151 //
3152 // Fill in the information field correctedly if this worked.
3153 //
3154
3155 FatDoCompletionZero( Irp, Context );
3156
3157 if (NT_SUCCESS(Irp->IoStatus.Status)) {
3158
3159 NT_ASSERT( Irp->IoStatus.Information != 0 );
3160 Irp->IoStatus.Information = Context->Wait.Async.RequestedByteCount;
3161 NT_ASSERT( Irp->IoStatus.Information != 0 );
3162
3163 //
3164 // Now if this wasn't PagingIo, set either the read or write bit.
3165 //
3166
3167 if (!FlagOn(Irp->Flags, IRP_PAGING_IO)) {
3168
3169 SetFlag( Context->Wait.Async.FileObject->Flags,
3170 IoGetCurrentIrpStackLocation(Irp)->MajorFunction == IRP_MJ_READ ?
3171 FO_FILE_FAST_IO_READ : FO_FILE_MODIFIED );
3172 }
3173
3174 } else {
3175
3176 #if DBG
3177 if(!( NT_SUCCESS( FatBreakOnInterestingIoCompletion ) || Irp->IoStatus.Status != FatBreakOnInterestingIoCompletion )) {
3178 DbgBreakPoint();
3179 }
3180 #endif
3181
3182 #ifdef SYSCACHE_COMPILE
3183 DbgPrint( "FAT SYSCACHE: SingleAsync (IRP %08x) -> %08x\n", Irp, Irp->IoStatus );
3184 #endif
3185
3186 //
3187 // Post STATUS_VERIFY_REQUIRED failures. Only post top level IRPs, because recursive I/Os
3188 // cannot process volume verification.
3189 //
3190
3191 if (!FlagOn(Context->IrpContextFlags, IRP_CONTEXT_FLAG_RECURSIVE_CALL) &&
3192 (Irp->IoStatus.Status == STATUS_VERIFY_REQUIRED)) {
3193 PostRequest = TRUE;
3194 }
3195
3196 }
3197
3198 //
3199 // If this was a special async write, decrement the count. Set the
3200 // event if this was the final outstanding I/O for the file. We will
3201 // also want to queue an APC to deal with any error conditionions.
3202 //
3203 _Analysis_assume_(!(Context->Wait.Async.NonPagedFcb) &&
3204 (ExInterlockedAddUlong( &Context->Wait.Async.NonPagedFcb->OutstandingAsyncWrites,
3205 0xffffffff,
3206 &FatData.GeneralSpinLock ) != 1));
3207
3208 if ((Context->Wait.Async.NonPagedFcb) &&
3209 (ExInterlockedAddUlong( &Context->Wait.Async.NonPagedFcb->OutstandingAsyncWrites,
3210 0xffffffff,
3211 &FatData.GeneralSpinLock ) == 1)) {
3212
3213 KeSetEvent( Context->Wait.Async.NonPagedFcb->OutstandingAsyncEvent, 0, FALSE );
3214 }
3215
3216 //
3217 // Now release the resources
3218 //
3219
3220 if (Context->Wait.Async.Resource != NULL) {
3221
3222 ExReleaseResourceForThreadLite( Context->Wait.Async.Resource,
3223 Context->Wait.Async.ResourceThreadId );
3224 }
3225
3226 if (Context->Wait.Async.Resource2 != NULL) {
3227
3228 ExReleaseResourceForThreadLite( Context->Wait.Async.Resource2,
3229 Context->Wait.Async.ResourceThreadId );
3230 }
3231
3232 //
3233 // Mark the Irp pending
3234 //
3235
3236 IoMarkIrpPending( Irp );
3237
3238 //
3239 // and finally, free the context record.
3240 //
3241
3242 ExFreePool( Context );
3243
3244 if (PostRequest) {
3245
3246 PIRP_CONTEXT IrpContext = NULL;
3247
3248 _SEH2_TRY {
3249
3250 IrpContext = FatCreateIrpContext(Irp, TRUE );
3251 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_RECURSIVE_CALL);
3252 FatFsdPostRequest( IrpContext, Irp );
3253 Status = STATUS_MORE_PROCESSING_REQUIRED;
3254
_SEH2_EXCEPT(FatExceptionFilter (NULL,_SEH2_GetExceptionInformation ()))3255 } _SEH2_EXCEPT( FatExceptionFilter(NULL, _SEH2_GetExceptionInformation()) ) {
3256
3257 //
3258 // If we failed to post the IRP, we just have to return the failure
3259 // to the user. :(
3260 //
3261
3262 NOTHING;
3263 } _SEH2_END;
3264 }
3265
3266
3267 DebugTrace(-1, Dbg, "FatSingleAsyncCompletionRoutine -> STATUS_MORE_PROCESSING_REQUIRED\n", 0 );
3268
3269 UNREFERENCED_PARAMETER( DeviceObject );
3270
3271 return Status;
3272 }
3273
3274
3275 VOID
FatLockUserBuffer(IN PIRP_CONTEXT IrpContext,IN OUT PIRP Irp,IN LOCK_OPERATION Operation,IN ULONG BufferLength)3276 FatLockUserBuffer (
3277 IN PIRP_CONTEXT IrpContext,
3278 IN OUT PIRP Irp,
3279 IN LOCK_OPERATION Operation,
3280 IN ULONG BufferLength
3281 )
3282
3283 /*++
3284
3285 Routine Description:
3286
3287 This routine locks the specified buffer for the specified type of
3288 access. The file system requires this routine since it does not
3289 ask the I/O system to lock its buffers for direct I/O. This routine
3290 may only be called from the Fsd while still in the user context.
3291
3292 Note that this is the *input/output* buffer.
3293
3294 Arguments:
3295
3296 Irp - Pointer to the Irp for which the buffer is to be locked.
3297
3298 Operation - IoWriteAccess for read operations, or IoReadAccess for
3299 write operations.
3300
3301 BufferLength - Length of user buffer.
3302
3303 Return Value:
3304
3305 None
3306
3307 --*/
3308
3309 {
3310 PMDL Mdl = NULL;
3311
3312 PAGED_CODE();
3313
3314 if (Irp->MdlAddress == NULL) {
3315
3316 //
3317 // Allocate the Mdl, and Raise if we fail.
3318 //
3319
3320 Mdl = IoAllocateMdl( Irp->UserBuffer, BufferLength, FALSE, FALSE, Irp );
3321
3322 if (Mdl == NULL) {
3323
3324 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
3325 }
3326
3327 //
3328 // Now probe the buffer described by the Irp. If we get an exception,
3329 // deallocate the Mdl and return the appropriate "expected" status.
3330 //
3331
3332 _SEH2_TRY {
3333
3334 MmProbeAndLockPages( Mdl,
3335 Irp->RequestorMode,
3336 Operation );
3337
3338 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
3339
3340 NTSTATUS Status;
3341
3342 Status = _SEH2_GetExceptionCode();
3343
3344 IoFreeMdl( Mdl );
3345 Irp->MdlAddress = NULL;
3346
3347 FatRaiseStatus( IrpContext,
3348 FsRtlIsNtstatusExpected(Status) ? Status : STATUS_INVALID_USER_BUFFER );
3349 } _SEH2_END;
3350 }
3351
3352 UNREFERENCED_PARAMETER( IrpContext );
3353 }
3354
3355
3356 PVOID
FatMapUserBuffer(IN PIRP_CONTEXT IrpContext,IN OUT PIRP Irp)3357 FatMapUserBuffer (
3358 IN PIRP_CONTEXT IrpContext,
3359 IN OUT PIRP Irp
3360 )
3361
3362 /*++
3363
3364 Routine Description:
3365
3366 This routine conditionally maps the user buffer for the current I/O
3367 request in the specified mode. If the buffer is already mapped, it
3368 just returns its address.
3369
3370 Note that this is the *input/output* buffer.
3371
3372 Arguments:
3373
3374 Irp - Pointer to the Irp for the request.
3375
3376 Return Value:
3377
3378 Mapped address
3379
3380 --*/
3381
3382 {
3383 UNREFERENCED_PARAMETER( IrpContext );
3384
3385 PAGED_CODE();
3386
3387 //
3388 // If there is no Mdl, then we must be in the Fsd, and we can simply
3389 // return the UserBuffer field from the Irp.
3390 //
3391
3392 if (Irp->MdlAddress == NULL) {
3393
3394 return Irp->UserBuffer;
3395
3396 } else {
3397
3398 PVOID Address = MmGetSystemAddressForMdlSafe( Irp->MdlAddress, NormalPagePriority | MdlMappingNoExecute );
3399
3400 if (Address == NULL) {
3401
3402 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
3403 }
3404
3405 return Address;
3406 }
3407 }
3408
3409
3410 PVOID
FatBufferUserBuffer(IN PIRP_CONTEXT IrpContext,IN OUT PIRP Irp,IN ULONG BufferLength)3411 FatBufferUserBuffer (
3412 IN PIRP_CONTEXT IrpContext,
3413 IN OUT PIRP Irp,
3414 IN ULONG BufferLength
3415 )
3416
3417 /*++
3418
3419 Routine Description:
3420
3421 This routine conditionally buffers the user buffer for the current I/O
3422 request. If the buffer is already buffered, it just returns its address.
3423
3424 Note that this is the *input* buffer.
3425
3426 Arguments:
3427
3428 Irp - Pointer to the Irp for the request.
3429
3430 BufferLength - Length of user buffer.
3431
3432 Return Value:
3433
3434 Buffered address.
3435
3436 --*/
3437
3438 {
3439 PUCHAR UserBuffer;
3440
3441 UNREFERENCED_PARAMETER( IrpContext );
3442
3443 PAGED_CODE();
3444
3445 //
3446 // Handle the no buffer case.
3447 //
3448
3449 if (BufferLength == 0) {
3450
3451 return NULL;
3452 }
3453
3454 //
3455 // If there is no system buffer we must have been supplied an Mdl
3456 // describing the users input buffer, which we will now snapshot.
3457 //
3458
3459 if (Irp->AssociatedIrp.SystemBuffer == NULL) {
3460
3461 UserBuffer = FatMapUserBuffer( IrpContext, Irp );
3462
3463 Irp->AssociatedIrp.SystemBuffer = FsRtlAllocatePoolWithQuotaTag( NonPagedPoolNx,
3464 BufferLength,
3465 TAG_IO_USER_BUFFER );
3466
3467 //
3468 // Set the flags so that the completion code knows to deallocate the
3469 // buffer.
3470 //
3471
3472 Irp->Flags |= (IRP_BUFFERED_IO | IRP_DEALLOCATE_BUFFER);
3473
3474 _SEH2_TRY {
3475
3476 RtlCopyMemory( Irp->AssociatedIrp.SystemBuffer,
3477 UserBuffer,
3478 BufferLength );
3479
3480 } _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER) {
3481
3482 NTSTATUS Status;
3483
3484 Status = _SEH2_GetExceptionCode();
3485 FatRaiseStatus( IrpContext,
3486 FsRtlIsNtstatusExpected(Status) ? Status : STATUS_INVALID_USER_BUFFER );
3487 } _SEH2_END;
3488 }
3489
3490 return Irp->AssociatedIrp.SystemBuffer;
3491 }
3492
3493
3494 NTSTATUS
FatToggleMediaEjectDisable(IN PIRP_CONTEXT IrpContext,IN PVCB Vcb,IN BOOLEAN PreventRemoval)3495 FatToggleMediaEjectDisable (
3496 IN PIRP_CONTEXT IrpContext,
3497 IN PVCB Vcb,
3498 IN BOOLEAN PreventRemoval
3499 )
3500
3501 /*++
3502
3503 Routine Description:
3504
3505 The routine either enables or disables the eject button on removable
3506 media.
3507
3508 Arguments:
3509
3510 Vcb - Descibes the volume to operate on
3511
3512 PreventRemoval - TRUE if we should disable the media eject button. FALSE
3513 if we want to enable it.
3514
3515 Return Value:
3516
3517 Status of the operation.
3518
3519 --*/
3520
3521 {
3522 PIRP Irp;
3523 KIRQL SavedIrql;
3524 NTSTATUS Status;
3525 FAT_SYNC_CONTEXT SyncContext;
3526 PREVENT_MEDIA_REMOVAL Prevent;
3527
3528 UNREFERENCED_PARAMETER( IrpContext );
3529
3530 //
3531 // If PreventRemoval is the same as VCB_STATE_FLAG_REMOVAL_PREVENTED,
3532 // no-op this call, otherwise toggle the state of the flag.
3533 //
3534
3535 KeAcquireSpinLock( &FatData.GeneralSpinLock, &SavedIrql );
3536
3537 if ((PreventRemoval ^
3538 BooleanFlagOn(Vcb->VcbState, VCB_STATE_FLAG_REMOVAL_PREVENTED)) == 0) {
3539
3540 KeReleaseSpinLock( &FatData.GeneralSpinLock, SavedIrql );
3541
3542 return STATUS_SUCCESS;
3543
3544 } else {
3545
3546 Vcb->VcbState ^= VCB_STATE_FLAG_REMOVAL_PREVENTED;
3547
3548 KeReleaseSpinLock( &FatData.GeneralSpinLock, SavedIrql );
3549 }
3550
3551 Prevent.PreventMediaRemoval = PreventRemoval;
3552
3553 KeInitializeEvent( &SyncContext.Event, NotificationEvent, FALSE );
3554
3555 //
3556 // We build this IRP using a junk Iosb that will receive the final
3557 // completion status since we won't be around for it.
3558 //
3559 // We fill in the UserIosb manually below,
3560 // So passing NULL for the final parameter is ok in this special case.
3561 //
3562 #ifdef _MSC_VER
3563 #pragma warning(suppress: 6387)
3564 #endif
3565 Irp = IoBuildDeviceIoControlRequest( IOCTL_DISK_MEDIA_REMOVAL,
3566 Vcb->TargetDeviceObject,
3567 &Prevent,
3568 sizeof(PREVENT_MEDIA_REMOVAL),
3569 NULL,
3570 0,
3571 FALSE,
3572 NULL,
3573 NULL );
3574
3575 if ( Irp != NULL ) {
3576
3577 //
3578 // Use our special completion routine which will remove the requirement that
3579 // the caller must be below APC level. All it tells us is that the Irp got
3580 // back, but will not tell us if it was succesful or not. We don't care,
3581 // and there is of course no fallback if the attempt to prevent removal
3582 // doesn't work for some mysterious reason.
3583 //
3584 // Normally, all IO is done at passive level. However, MM needs to be able
3585 // to issue IO with fast mutexes locked down, which raises us to APC. The
3586 // overlying IRP is set up to complete in yet another magical fashion even
3587 // though APCs are disabled, and any IRPage we do in these cases has to do
3588 // the same. Marking media dirty (and toggling eject state) is one.
3589 //
3590
3591 Irp->UserIosb = &Irp->IoStatus;
3592
3593 IoSetCompletionRoutine( Irp,
3594 FatSpecialSyncCompletionRoutine,
3595 &SyncContext,
3596 TRUE,
3597 TRUE,
3598 TRUE );
3599
3600 Status = IoCallDriver( Vcb->TargetDeviceObject, Irp );
3601
3602 if (Status == STATUS_PENDING) {
3603
3604 (VOID) KeWaitForSingleObject( &SyncContext.Event,
3605 Executive,
3606 KernelMode,
3607 FALSE,
3608 NULL );
3609
3610 Status = SyncContext.Iosb.Status;
3611 }
3612
3613 return Status;
3614 }
3615
3616 return STATUS_INSUFFICIENT_RESOURCES;
3617 }
3618
3619
3620 NTSTATUS
FatPerformDevIoCtrl(IN PIRP_CONTEXT IrpContext,IN ULONG IoControlCode,IN PDEVICE_OBJECT Device,IN PVOID InputBuffer OPTIONAL,IN ULONG InputBufferLength,OUT PVOID OutputBuffer OPTIONAL,IN ULONG OutputBufferLength,IN BOOLEAN InternalDeviceIoControl,IN BOOLEAN OverrideVerify,OUT PIO_STATUS_BLOCK Iosb OPTIONAL)3621 FatPerformDevIoCtrl (
3622 IN PIRP_CONTEXT IrpContext,
3623 IN ULONG IoControlCode,
3624 IN PDEVICE_OBJECT Device,
3625 IN PVOID InputBuffer OPTIONAL,
3626 IN ULONG InputBufferLength,
3627 OUT PVOID OutputBuffer OPTIONAL,
3628 IN ULONG OutputBufferLength,
3629 IN BOOLEAN InternalDeviceIoControl,
3630 IN BOOLEAN OverrideVerify,
3631 OUT PIO_STATUS_BLOCK Iosb OPTIONAL
3632 )
3633
3634 /*++
3635
3636 Routine Description:
3637
3638 This routine is called to perform DevIoCtrl functions internally within
3639 the filesystem. We take the status from the driver and return it to our
3640 caller.
3641
3642 Arguments:
3643
3644 IoControlCode - Code to send to driver.
3645
3646 Device - This is the device to send the request to.
3647
3648 OutPutBuffer - Pointer to output buffer.
3649
3650 OutputBufferLength - Length of output buffer above.
3651
3652 InternalDeviceIoControl - Indicates if this is an internal or external
3653 Io control code.
3654
3655 OverrideVerify - Indicates if we should tell the driver not to return
3656 STATUS_VERIFY_REQUIRED for mount and verify.
3657
3658 Iosb - If specified, we return the results of the operation here.
3659
3660 Return Value:
3661
3662 NTSTATUS - Status returned by next lower driver.
3663
3664 --*/
3665
3666 {
3667 NTSTATUS Status;
3668 PIRP Irp;
3669 KEVENT Event;
3670 IO_STATUS_BLOCK LocalIosb;
3671 PIO_STATUS_BLOCK IosbToUse = &LocalIosb;
3672
3673 PAGED_CODE();
3674
3675 UNREFERENCED_PARAMETER( IrpContext );
3676
3677 //
3678 // Check if the user gave us an Iosb.
3679 //
3680
3681 if (ARGUMENT_PRESENT( Iosb )) {
3682
3683 IosbToUse = Iosb;
3684 }
3685
3686 IosbToUse->Status = 0;
3687 IosbToUse->Information = 0;
3688
3689 KeInitializeEvent( &Event, NotificationEvent, FALSE );
3690
3691 Irp = IoBuildDeviceIoControlRequest( IoControlCode,
3692 Device,
3693 InputBuffer,
3694 InputBufferLength,
3695 OutputBuffer,
3696 OutputBufferLength,
3697 InternalDeviceIoControl,
3698 &Event,
3699 IosbToUse );
3700
3701 if (Irp == NULL) {
3702
3703 return STATUS_INSUFFICIENT_RESOURCES;
3704 }
3705
3706 if (OverrideVerify) {
3707
3708 SetFlag( IoGetNextIrpStackLocation( Irp )->Flags, SL_OVERRIDE_VERIFY_VOLUME );
3709 }
3710
3711 Status = IoCallDriver( Device, Irp );
3712
3713 //
3714 // We check for device not ready by first checking Status
3715 // and then if status pending was returned, the Iosb status
3716 // value.
3717 //
3718
3719 if (Status == STATUS_PENDING) {
3720
3721 (VOID) KeWaitForSingleObject( &Event,
3722 Executive,
3723 KernelMode,
3724 FALSE,
3725 (PLARGE_INTEGER)NULL );
3726
3727 Status = IosbToUse->Status;
3728 }
3729
3730 return Status;
3731 }
3732
3733 PMDL
FatBuildZeroMdl(__in PIRP_CONTEXT IrpContext,__in ULONG Length)3734 FatBuildZeroMdl (
3735 __in PIRP_CONTEXT IrpContext,
3736 __in ULONG Length
3737 )
3738 /*++
3739
3740 Routine Description:
3741
3742 Create an efficient mdl that describe a given length of zeros. We'll only
3743 use a one page buffer and make a mdl that maps all the pages back to the single
3744 physical page. We'll default to a smaller size buffer down to 1 PAGE if memory
3745 is tight. The caller should check the Mdl->ByteCount to see the true size
3746
3747 Arguments:
3748
3749 Length - The desired length of the zero buffer. We may return less than this
3750
3751 Return Value:
3752
3753 a MDL if successful / NULL if not
3754
3755 --*/
3756
3757 {
3758 PMDL ZeroMdl;
3759 ULONG SavedByteCount;
3760 PPFN_NUMBER Page;
3761 ULONG i;
3762
3763 UNREFERENCED_PARAMETER( IrpContext );
3764
3765 //
3766 // Spin down trying to get an MDL which can describe our operation.
3767 //
3768
3769 while (TRUE) {
3770
3771 ZeroMdl = IoAllocateMdl( FatData.ZeroPage, Length, FALSE, FALSE, NULL );
3772
3773 //
3774 // Throttle ourselves to what we've physically allocated. Note that
3775 // we could have started with an odd multiple of this number. If we
3776 // tried for exactly that size and failed, we're toast.
3777 //
3778
3779 if (ZeroMdl || (Length <= PAGE_SIZE)) {
3780
3781 break;
3782 }
3783
3784 //
3785 // Fallback by half and round down to a page multiple.
3786 //
3787
3788 ASSERT( IrpContext->Vcb->Bpb.BytesPerSector <= PAGE_SIZE );
3789 Length = BlockAlignTruncate( Length / 2, PAGE_SIZE );
3790 if (Length < PAGE_SIZE) {
3791 Length = PAGE_SIZE;
3792 }
3793 }
3794
3795 if (ZeroMdl == NULL) {
3796 return NULL;
3797 }
3798
3799 //
3800 // If we have throttled all the way down, stop and just build a
3801 // simple MDL describing our previous allocation.
3802 //
3803
3804 if (Length == PAGE_SIZE) {
3805
3806 MmBuildMdlForNonPagedPool( ZeroMdl );
3807 return ZeroMdl;
3808 }
3809
3810 //
3811 // Now we will temporarily lock the allocated pages
3812 // only, and then replicate the page frame numbers through
3813 // the entire Mdl to keep writing the same pages of zeros.
3814 //
3815 // It would be nice if Mm exported a way for us to not have
3816 // to pull the Mdl apart and rebuild it ourselves, but this
3817 // is so bizarre a purpose as to be tolerable.
3818 //
3819
3820 SavedByteCount = ZeroMdl->ByteCount;
3821 ZeroMdl->ByteCount = PAGE_SIZE;
3822 MmBuildMdlForNonPagedPool( ZeroMdl );
3823
3824 ZeroMdl->MdlFlags &= ~MDL_SOURCE_IS_NONPAGED_POOL;
3825 ZeroMdl->MdlFlags |= MDL_PAGES_LOCKED;
3826 ZeroMdl->MappedSystemVa = NULL;
3827 ZeroMdl->StartVa = NULL;
3828 ZeroMdl->ByteCount = SavedByteCount;
3829 Page = MmGetMdlPfnArray( ZeroMdl );
3830 for (i = 1; i < (ADDRESS_AND_SIZE_TO_SPAN_PAGES( 0, SavedByteCount )); i++) {
3831 *(Page + i) = *(Page);
3832 }
3833
3834
3835 return ZeroMdl;
3836 }
3837
3838
3839