1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/cc/copy.c 5 * PURPOSE: Implements cache managers copy interface 6 * 7 * PROGRAMMERS: Some people? 8 * Pierre Schweitzer (pierre@reactos.org) 9 */ 10 11 /* INCLUDES ******************************************************************/ 12 13 #include <ntoskrnl.h> 14 #define NDEBUG 15 #include <debug.h> 16 17 /* GLOBALS *******************************************************************/ 18 19 static PFN_NUMBER CcZeroPage = 0; 20 21 #define MAX_ZERO_LENGTH (256 * 1024) 22 23 typedef enum _CC_COPY_OPERATION 24 { 25 CcOperationRead, 26 CcOperationWrite, 27 CcOperationZero 28 } CC_COPY_OPERATION; 29 30 ULONG CcRosTraceLevel = 0; 31 ULONG CcFastMdlReadWait; 32 ULONG CcFastMdlReadNotPossible; 33 ULONG CcFastReadNotPossible; 34 ULONG CcFastReadWait; 35 ULONG CcFastReadNoWait; 36 ULONG CcFastReadResourceMiss; 37 38 extern KEVENT iLazyWriterNotify; 39 40 /* FUNCTIONS *****************************************************************/ 41 42 VOID 43 NTAPI 44 MiZeroPhysicalPage ( 45 IN PFN_NUMBER PageFrameIndex 46 ); 47 48 VOID 49 NTAPI 50 CcInitCacheZeroPage ( 51 VOID) 52 { 53 NTSTATUS Status; 54 55 MI_SET_USAGE(MI_USAGE_CACHE); 56 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName); 57 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage); 58 if (!NT_SUCCESS(Status)) 59 { 60 DbgPrint("Can't allocate CcZeroPage.\n"); 61 KeBugCheck(CACHE_MANAGER); 62 } 63 MiZeroPhysicalPage(CcZeroPage); 64 } 65 66 NTSTATUS 67 NTAPI 68 CcReadVirtualAddress ( 69 PROS_VACB Vacb) 70 { 71 ULONG Size, Pages; 72 PMDL Mdl; 73 NTSTATUS Status; 74 IO_STATUS_BLOCK IoStatus; 75 KEVENT Event; 76 77 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart); 78 if (Size > VACB_MAPPING_GRANULARITY) 79 { 80 Size = VACB_MAPPING_GRANULARITY; 81 } 82 83 Pages = BYTES_TO_PAGES(Size); 84 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY); 85 86 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL); 87 if (!Mdl) 88 { 89 return STATUS_INSUFFICIENT_RESOURCES; 90 } 91 92 Status = STATUS_SUCCESS; 93 _SEH2_TRY 94 { 95 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess); 96 } 97 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER) 98 { 99 Status = _SEH2_GetExceptionCode(); 100 KeBugCheck(CACHE_MANAGER); 101 } _SEH2_END; 102 103 if (NT_SUCCESS(Status)) 104 { 105 Mdl->MdlFlags |= MDL_IO_PAGE_READ; 106 KeInitializeEvent(&Event, NotificationEvent, FALSE); 107 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus); 108 if (Status == STATUS_PENDING) 109 { 110 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL); 111 Status = IoStatus.Status; 112 } 113 114 MmUnlockPages(Mdl); 115 } 116 117 IoFreeMdl(Mdl); 118 119 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE)) 120 { 121 DPRINT1("IoPageRead failed, Status %x\n", Status); 122 return Status; 123 } 124 125 if (Size < VACB_MAPPING_GRANULARITY) 126 { 127 RtlZeroMemory((char*)Vacb->BaseAddress + Size, 128 VACB_MAPPING_GRANULARITY - Size); 129 } 130 131 return STATUS_SUCCESS; 132 } 133 134 NTSTATUS 135 NTAPI 136 CcWriteVirtualAddress ( 137 PROS_VACB Vacb) 138 { 139 ULONG Size; 140 PMDL Mdl; 141 NTSTATUS Status; 142 IO_STATUS_BLOCK IoStatus; 143 KEVENT Event; 144 145 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart); 146 if (Size > VACB_MAPPING_GRANULARITY) 147 { 148 Size = VACB_MAPPING_GRANULARITY; 149 } 150 // 151 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the 152 // MmGlobalPageDirectory and the real system PDE directory. What a mess... 153 // 154 { 155 ULONG i = 0; 156 do 157 { 158 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT))); 159 } while (++i < (Size >> PAGE_SHIFT)); 160 } 161 162 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL); 163 if (!Mdl) 164 { 165 return STATUS_INSUFFICIENT_RESOURCES; 166 } 167 168 Status = STATUS_SUCCESS; 169 _SEH2_TRY 170 { 171 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess); 172 } 173 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER) 174 { 175 Status = _SEH2_GetExceptionCode(); 176 KeBugCheck(CACHE_MANAGER); 177 } _SEH2_END; 178 179 if (NT_SUCCESS(Status)) 180 { 181 KeInitializeEvent(&Event, NotificationEvent, FALSE); 182 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus); 183 if (Status == STATUS_PENDING) 184 { 185 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL); 186 Status = IoStatus.Status; 187 } 188 189 MmUnlockPages(Mdl); 190 } 191 IoFreeMdl(Mdl); 192 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE)) 193 { 194 DPRINT1("IoPageWrite failed, Status %x\n", Status); 195 return Status; 196 } 197 198 return STATUS_SUCCESS; 199 } 200 201 NTSTATUS 202 ReadWriteOrZero( 203 _Inout_ PVOID BaseAddress, 204 _Inout_opt_ PVOID Buffer, 205 _In_ ULONG Length, 206 _In_ CC_COPY_OPERATION Operation) 207 { 208 NTSTATUS Status = STATUS_SUCCESS; 209 210 if (Operation == CcOperationZero) 211 { 212 /* Zero */ 213 RtlZeroMemory(BaseAddress, Length); 214 } 215 else 216 { 217 _SEH2_TRY 218 { 219 if (Operation == CcOperationWrite) 220 RtlCopyMemory(BaseAddress, Buffer, Length); 221 else 222 RtlCopyMemory(Buffer, BaseAddress, Length); 223 } 224 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) 225 { 226 Status = _SEH2_GetExceptionCode(); 227 } 228 _SEH2_END; 229 } 230 return Status; 231 } 232 233 BOOLEAN 234 CcCopyData ( 235 _In_ PFILE_OBJECT FileObject, 236 _In_ LONGLONG FileOffset, 237 _Inout_ PVOID Buffer, 238 _In_ LONGLONG Length, 239 _In_ CC_COPY_OPERATION Operation, 240 _In_ BOOLEAN Wait, 241 _Out_ PIO_STATUS_BLOCK IoStatus) 242 { 243 NTSTATUS Status; 244 LONGLONG CurrentOffset; 245 ULONG BytesCopied; 246 KIRQL OldIrql; 247 PROS_SHARED_CACHE_MAP SharedCacheMap; 248 PLIST_ENTRY ListEntry; 249 PROS_VACB Vacb; 250 ULONG PartialLength; 251 PVOID BaseAddress; 252 BOOLEAN Valid; 253 254 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 255 CurrentOffset = FileOffset; 256 BytesCopied = 0; 257 258 if (!Wait) 259 { 260 /* test if the requested data is available */ 261 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql); 262 /* FIXME: this loop doesn't take into account areas that don't have 263 * a VACB in the list yet */ 264 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink; 265 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead) 266 { 267 Vacb = CONTAINING_RECORD(ListEntry, 268 ROS_VACB, 269 CacheMapVacbListEntry); 270 ListEntry = ListEntry->Flink; 271 if (!Vacb->Valid && 272 DoRangesIntersect(Vacb->FileOffset.QuadPart, 273 VACB_MAPPING_GRANULARITY, 274 CurrentOffset, Length)) 275 { 276 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql); 277 /* data not available */ 278 return FALSE; 279 } 280 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length) 281 break; 282 } 283 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql); 284 } 285 286 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY; 287 if (PartialLength != 0) 288 { 289 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength); 290 Status = CcRosRequestVacb(SharedCacheMap, 291 ROUND_DOWN(CurrentOffset, 292 VACB_MAPPING_GRANULARITY), 293 &BaseAddress, 294 &Valid, 295 &Vacb); 296 if (!NT_SUCCESS(Status)) 297 ExRaiseStatus(Status); 298 if (!Valid) 299 { 300 Status = CcReadVirtualAddress(Vacb); 301 if (!NT_SUCCESS(Status)) 302 { 303 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE); 304 ExRaiseStatus(Status); 305 } 306 } 307 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY, 308 Buffer, 309 PartialLength, 310 Operation); 311 312 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE); 313 314 if (!NT_SUCCESS(Status)) 315 ExRaiseStatus(STATUS_INVALID_USER_BUFFER); 316 317 Length -= PartialLength; 318 CurrentOffset += PartialLength; 319 BytesCopied += PartialLength; 320 321 if (Operation != CcOperationZero) 322 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength); 323 } 324 325 while (Length > 0) 326 { 327 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0); 328 PartialLength = min(VACB_MAPPING_GRANULARITY, Length); 329 Status = CcRosRequestVacb(SharedCacheMap, 330 CurrentOffset, 331 &BaseAddress, 332 &Valid, 333 &Vacb); 334 if (!NT_SUCCESS(Status)) 335 ExRaiseStatus(Status); 336 if (!Valid && 337 (Operation == CcOperationRead || 338 PartialLength < VACB_MAPPING_GRANULARITY)) 339 { 340 Status = CcReadVirtualAddress(Vacb); 341 if (!NT_SUCCESS(Status)) 342 { 343 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE); 344 ExRaiseStatus(Status); 345 } 346 } 347 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation); 348 349 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE); 350 351 if (!NT_SUCCESS(Status)) 352 ExRaiseStatus(STATUS_INVALID_USER_BUFFER); 353 354 Length -= PartialLength; 355 CurrentOffset += PartialLength; 356 BytesCopied += PartialLength; 357 358 if (Operation != CcOperationZero) 359 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength); 360 } 361 IoStatus->Status = STATUS_SUCCESS; 362 IoStatus->Information = BytesCopied; 363 return TRUE; 364 } 365 366 /* 367 * @unimplemented 368 */ 369 BOOLEAN 370 NTAPI 371 CcCanIWrite ( 372 IN PFILE_OBJECT FileObject, 373 IN ULONG BytesToWrite, 374 IN BOOLEAN Wait, 375 IN BOOLEAN Retrying) 376 { 377 PFSRTL_COMMON_FCB_HEADER Fcb; 378 PROS_SHARED_CACHE_MAP SharedCacheMap; 379 380 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n", 381 FileObject, BytesToWrite, Wait, Retrying); 382 383 /* We cannot write if dirty pages count is above threshold */ 384 if (CcTotalDirtyPages > CcDirtyPageThreshold) 385 { 386 return FALSE; 387 } 388 389 /* We cannot write if dirty pages count will bring use above 390 * XXX: Might not be accurate 391 */ 392 if (CcTotalDirtyPages + (BytesToWrite / PAGE_SIZE) > CcDirtyPageThreshold) 393 { 394 return FALSE; 395 } 396 397 /* Is there a limit per file object? */ 398 Fcb = FileObject->FsContext; 399 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 400 if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES) || 401 SharedCacheMap->DirtyPageThreshold == 0) 402 { 403 /* Nope, so that's fine, allow write operation */ 404 return TRUE; 405 } 406 407 /* Is dirty page count above local threshold? */ 408 if (SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold) 409 { 410 return FALSE; 411 } 412 413 /* We cannot write if dirty pages count will bring use above 414 * XXX: Might not be accurate 415 */ 416 if (SharedCacheMap->DirtyPages + (BytesToWrite / PAGE_SIZE) > SharedCacheMap->DirtyPageThreshold) 417 { 418 return FALSE; 419 } 420 421 return TRUE; 422 } 423 424 /* 425 * @implemented 426 */ 427 BOOLEAN 428 NTAPI 429 CcCopyRead ( 430 IN PFILE_OBJECT FileObject, 431 IN PLARGE_INTEGER FileOffset, 432 IN ULONG Length, 433 IN BOOLEAN Wait, 434 OUT PVOID Buffer, 435 OUT PIO_STATUS_BLOCK IoStatus) 436 { 437 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n", 438 FileObject, FileOffset->QuadPart, Length, Wait); 439 440 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, " 441 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n", 442 FileObject, FileOffset->QuadPart, Length, Wait, 443 Buffer, IoStatus); 444 445 return CcCopyData(FileObject, 446 FileOffset->QuadPart, 447 Buffer, 448 Length, 449 CcOperationRead, 450 Wait, 451 IoStatus); 452 } 453 454 /* 455 * @implemented 456 */ 457 BOOLEAN 458 NTAPI 459 CcCopyWrite ( 460 IN PFILE_OBJECT FileObject, 461 IN PLARGE_INTEGER FileOffset, 462 IN ULONG Length, 463 IN BOOLEAN Wait, 464 IN PVOID Buffer) 465 { 466 IO_STATUS_BLOCK IoStatus; 467 468 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n", 469 FileObject, FileOffset->QuadPart, Length, Wait, Buffer); 470 471 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, " 472 "Length %lu, Wait %u, Buffer 0x%p)\n", 473 FileObject, FileOffset->QuadPart, Length, Wait, Buffer); 474 475 return CcCopyData(FileObject, 476 FileOffset->QuadPart, 477 Buffer, 478 Length, 479 CcOperationWrite, 480 Wait, 481 &IoStatus); 482 } 483 484 /* 485 * @implemented 486 */ 487 VOID 488 NTAPI 489 CcDeferWrite ( 490 IN PFILE_OBJECT FileObject, 491 IN PCC_POST_DEFERRED_WRITE PostRoutine, 492 IN PVOID Context1, 493 IN PVOID Context2, 494 IN ULONG BytesToWrite, 495 IN BOOLEAN Retrying) 496 { 497 PDEFERRED_WRITE Context; 498 499 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n", 500 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying); 501 502 /* Try to allocate a context for queueing the write operation */ 503 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw'); 504 /* If it failed, immediately execute the operation! */ 505 if (Context == NULL) 506 { 507 PostRoutine(Context1, Context2); 508 return; 509 } 510 511 /* Otherwise, initialize the context */ 512 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE)); 513 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE; 514 Context->NodeByteSize = sizeof(DEFERRED_WRITE); 515 Context->FileObject = FileObject; 516 Context->PostRoutine = PostRoutine; 517 Context->Context1 = Context1; 518 Context->Context2 = Context2; 519 Context->BytesToWrite = BytesToWrite; 520 521 /* And queue it */ 522 if (Retrying) 523 { 524 /* To the top, if that's a retry */ 525 ExInterlockedInsertHeadList(&CcDeferredWrites, 526 &Context->DeferredWriteLinks, 527 &CcDeferredWriteSpinLock); 528 } 529 else 530 { 531 /* To the bottom, if that's a first time */ 532 ExInterlockedInsertTailList(&CcDeferredWrites, 533 &Context->DeferredWriteLinks, 534 &CcDeferredWriteSpinLock); 535 } 536 } 537 538 /* 539 * @unimplemented 540 */ 541 VOID 542 NTAPI 543 CcFastCopyRead ( 544 IN PFILE_OBJECT FileObject, 545 IN ULONG FileOffset, 546 IN ULONG Length, 547 IN ULONG PageCount, 548 OUT PVOID Buffer, 549 OUT PIO_STATUS_BLOCK IoStatus) 550 { 551 LARGE_INTEGER LargeFileOffset; 552 BOOLEAN Success; 553 554 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n", 555 FileObject, FileOffset, Length, PageCount, Buffer); 556 557 DBG_UNREFERENCED_PARAMETER(PageCount); 558 559 LargeFileOffset.QuadPart = FileOffset; 560 Success = CcCopyRead(FileObject, 561 &LargeFileOffset, 562 Length, 563 TRUE, 564 Buffer, 565 IoStatus); 566 ASSERT(Success == TRUE); 567 } 568 569 /* 570 * @unimplemented 571 */ 572 VOID 573 NTAPI 574 CcFastCopyWrite ( 575 IN PFILE_OBJECT FileObject, 576 IN ULONG FileOffset, 577 IN ULONG Length, 578 IN PVOID Buffer) 579 { 580 LARGE_INTEGER LargeFileOffset; 581 BOOLEAN Success; 582 583 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n", 584 FileObject, FileOffset, Length, Buffer); 585 586 LargeFileOffset.QuadPart = FileOffset; 587 Success = CcCopyWrite(FileObject, 588 &LargeFileOffset, 589 Length, 590 TRUE, 591 Buffer); 592 ASSERT(Success == TRUE); 593 } 594 595 /* 596 * @implemented 597 */ 598 NTSTATUS 599 NTAPI 600 CcWaitForCurrentLazyWriterActivity ( 601 VOID) 602 { 603 NTSTATUS Status; 604 605 /* Lazy writer is done when its event is set */ 606 Status = KeWaitForSingleObject(&iLazyWriterNotify, 607 Executive, 608 KernelMode, 609 FALSE, 610 NULL); 611 if (!NT_SUCCESS(Status)) 612 { 613 return Status; 614 } 615 616 return STATUS_SUCCESS; 617 } 618 619 /* 620 * @implemented 621 */ 622 BOOLEAN 623 NTAPI 624 CcZeroData ( 625 IN PFILE_OBJECT FileObject, 626 IN PLARGE_INTEGER StartOffset, 627 IN PLARGE_INTEGER EndOffset, 628 IN BOOLEAN Wait) 629 { 630 NTSTATUS Status; 631 LARGE_INTEGER WriteOffset; 632 LONGLONG Length; 633 ULONG CurrentLength; 634 PMDL Mdl; 635 ULONG i; 636 IO_STATUS_BLOCK Iosb; 637 KEVENT Event; 638 639 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n", 640 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait); 641 642 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, " 643 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart, 644 Wait); 645 646 Length = EndOffset->QuadPart - StartOffset->QuadPart; 647 WriteOffset.QuadPart = StartOffset->QuadPart; 648 649 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL) 650 { 651 /* File is not cached */ 652 653 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH)); 654 655 while (Length > 0) 656 { 657 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH) 658 { 659 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE; 660 } 661 else 662 { 663 CurrentLength = Length; 664 } 665 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength); 666 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ); 667 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++) 668 { 669 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage; 670 } 671 KeInitializeEvent(&Event, NotificationEvent, FALSE); 672 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb); 673 if (Status == STATUS_PENDING) 674 { 675 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL); 676 Status = Iosb.Status; 677 } 678 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) 679 { 680 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl); 681 } 682 if (!NT_SUCCESS(Status)) 683 { 684 return FALSE; 685 } 686 WriteOffset.QuadPart += CurrentLength; 687 Length -= CurrentLength; 688 } 689 } 690 else 691 { 692 IO_STATUS_BLOCK IoStatus; 693 694 return CcCopyData(FileObject, 695 WriteOffset.QuadPart, 696 NULL, 697 Length, 698 CcOperationZero, 699 Wait, 700 &IoStatus); 701 } 702 703 return TRUE; 704 } 705