1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/cc/copy.c 5 * PURPOSE: Implements cache managers copy interface 6 * 7 * PROGRAMMERS: Some people? 8 * Pierre Schweitzer (pierre@reactos.org) 9 */ 10 11 /* INCLUDES ******************************************************************/ 12 13 #include <ntoskrnl.h> 14 #define NDEBUG 15 #include <debug.h> 16 17 /* GLOBALS *******************************************************************/ 18 19 static PFN_NUMBER CcZeroPage = 0; 20 21 #define MAX_ZERO_LENGTH (256 * 1024) 22 23 typedef enum _CC_CAN_WRITE_RETRY 24 { 25 FirstTry = 0, 26 RetryAllowRemote = 253, 27 RetryForceCheckPerFile = 254, 28 RetryMasterLocked = 255, 29 } CC_CAN_WRITE_RETRY; 30 31 ULONG CcRosTraceLevel = CC_API_DEBUG; 32 ULONG CcFastMdlReadWait; 33 ULONG CcFastMdlReadNotPossible; 34 ULONG CcFastReadNotPossible; 35 ULONG CcFastReadWait; 36 ULONG CcFastReadNoWait; 37 ULONG CcFastReadResourceMiss; 38 39 /* Counters: 40 * - Amount of pages flushed to the disk 41 * - Number of flush operations 42 */ 43 ULONG CcDataPages = 0; 44 ULONG CcDataFlushes = 0; 45 46 /* FUNCTIONS *****************************************************************/ 47 48 VOID 49 NTAPI 50 MiZeroPhysicalPage ( 51 IN PFN_NUMBER PageFrameIndex 52 ); 53 54 VOID 55 NTAPI 56 CcInitCacheZeroPage ( 57 VOID) 58 { 59 NTSTATUS Status; 60 61 MI_SET_USAGE(MI_USAGE_CACHE); 62 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName); 63 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage); 64 if (!NT_SUCCESS(Status)) 65 { 66 DbgPrint("Can't allocate CcZeroPage.\n"); 67 KeBugCheck(CACHE_MANAGER); 68 } 69 MiZeroPhysicalPage(CcZeroPage); 70 } 71 72 VOID 73 CcPostDeferredWrites(VOID) 74 { 75 LIST_ENTRY ToInsertBack; 76 77 InitializeListHead(&ToInsertBack); 78 79 /* We'll try to write as much as we can */ 80 while (TRUE) 81 { 82 PDEFERRED_WRITE DeferredWrite; 83 PLIST_ENTRY ListEntry; 84 85 DeferredWrite = NULL; 86 87 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock); 88 89 if (!ListEntry) 90 break; 91 92 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks); 93 94 /* Check if we can write */ 95 if (CcCanIWrite(DeferredWrite->FileObject, DeferredWrite->BytesToWrite, FALSE, RetryForceCheckPerFile)) 96 { 97 /* If we have an event, set it and go along */ 98 if (DeferredWrite->Event) 99 { 100 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE); 101 } 102 /* Otherwise, call the write routine and free the context */ 103 else 104 { 105 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2); 106 ExFreePoolWithTag(DeferredWrite, 'CcDw'); 107 } 108 continue; 109 } 110 111 /* Keep it for later */ 112 InsertHeadList(&ToInsertBack, &DeferredWrite->DeferredWriteLinks); 113 114 /* If we don't accept modified pages, stop here */ 115 if (!DeferredWrite->LimitModifiedPages) 116 { 117 break; 118 } 119 } 120 121 /* Insert what we couldn't write back in the list */ 122 while (!IsListEmpty(&ToInsertBack)) 123 { 124 PLIST_ENTRY ListEntry = RemoveTailList(&ToInsertBack); 125 ExInterlockedInsertHeadList(&CcDeferredWrites, ListEntry, &CcDeferredWriteSpinLock); 126 } 127 } 128 129 VOID 130 CcPerformReadAhead( 131 IN PFILE_OBJECT FileObject) 132 { 133 NTSTATUS Status; 134 LONGLONG CurrentOffset; 135 KIRQL OldIrql; 136 PROS_SHARED_CACHE_MAP SharedCacheMap; 137 PROS_VACB Vacb; 138 ULONG PartialLength; 139 ULONG Length; 140 PPRIVATE_CACHE_MAP PrivateCacheMap; 141 BOOLEAN Locked; 142 BOOLEAN Success; 143 144 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 145 146 /* Critical: 147 * PrivateCacheMap might disappear in-between if the handle 148 * to the file is closed (private is attached to the handle not to 149 * the file), so we need to lock the master lock while we deal with 150 * it. It won't disappear without attempting to lock such lock. 151 */ 152 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 153 PrivateCacheMap = FileObject->PrivateCacheMap; 154 /* If the handle was closed since the read ahead was scheduled, just quit */ 155 if (PrivateCacheMap == NULL) 156 { 157 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 158 ObDereferenceObject(FileObject); 159 return; 160 } 161 /* Otherwise, extract read offset and length and release private map */ 162 else 163 { 164 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 165 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart; 166 Length = PrivateCacheMap->ReadAheadLength[1]; 167 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 168 } 169 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 170 171 /* Time to go! */ 172 DPRINT("Doing ReadAhead for %p\n", FileObject); 173 /* Lock the file, first */ 174 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE)) 175 { 176 Locked = FALSE; 177 goto Clear; 178 } 179 180 /* Remember it's locked */ 181 Locked = TRUE; 182 183 /* Don't read past the end of the file */ 184 if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart) 185 { 186 goto Clear; 187 } 188 if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart) 189 { 190 Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset; 191 } 192 193 /* Next of the algorithm will lock like CcCopyData with the slight 194 * difference that we don't copy data back to an user-backed buffer 195 * We just bring data into Cc 196 */ 197 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY; 198 if (PartialLength != 0) 199 { 200 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength); 201 Status = CcRosRequestVacb(SharedCacheMap, 202 ROUND_DOWN(CurrentOffset, VACB_MAPPING_GRANULARITY), 203 &Vacb); 204 if (!NT_SUCCESS(Status)) 205 { 206 DPRINT1("Failed to request VACB: %lx!\n", Status); 207 goto Clear; 208 } 209 210 _SEH2_TRY 211 { 212 Success = CcRosEnsureVacbResident(Vacb, TRUE, FALSE, 213 CurrentOffset % VACB_MAPPING_GRANULARITY, PartialLength); 214 } 215 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) 216 { 217 Success = FALSE; 218 } 219 _SEH2_END 220 221 if (!Success) 222 { 223 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 224 DPRINT1("Failed to read data: %lx!\n", Status); 225 goto Clear; 226 } 227 228 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 229 230 Length -= PartialLength; 231 CurrentOffset += PartialLength; 232 } 233 234 while (Length > 0) 235 { 236 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0); 237 PartialLength = min(VACB_MAPPING_GRANULARITY, Length); 238 Status = CcRosRequestVacb(SharedCacheMap, 239 CurrentOffset, 240 &Vacb); 241 if (!NT_SUCCESS(Status)) 242 { 243 DPRINT1("Failed to request VACB: %lx!\n", Status); 244 goto Clear; 245 } 246 247 _SEH2_TRY 248 { 249 Success = CcRosEnsureVacbResident(Vacb, TRUE, FALSE, 0, PartialLength); 250 } 251 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) 252 { 253 Success = FALSE; 254 } 255 _SEH2_END 256 257 if (!Success) 258 { 259 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 260 DPRINT1("Failed to read data: %lx!\n", Status); 261 goto Clear; 262 } 263 264 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 265 266 Length -= PartialLength; 267 CurrentOffset += PartialLength; 268 } 269 270 Clear: 271 /* See previous comment about private cache map */ 272 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 273 PrivateCacheMap = FileObject->PrivateCacheMap; 274 if (PrivateCacheMap != NULL) 275 { 276 /* Mark read ahead as unactive */ 277 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 278 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE); 279 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 280 } 281 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 282 283 /* If file was locked, release it */ 284 if (Locked) 285 { 286 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext); 287 } 288 289 /* And drop our extra reference (See: CcScheduleReadAhead) */ 290 ObDereferenceObject(FileObject); 291 292 return; 293 } 294 295 /* 296 * @unimplemented 297 */ 298 BOOLEAN 299 NTAPI 300 CcCanIWrite ( 301 IN PFILE_OBJECT FileObject, 302 IN ULONG BytesToWrite, 303 IN BOOLEAN Wait, 304 IN BOOLEAN Retrying) 305 { 306 KIRQL OldIrql; 307 KEVENT WaitEvent; 308 ULONG Length, Pages; 309 BOOLEAN PerFileDefer; 310 DEFERRED_WRITE Context; 311 PFSRTL_COMMON_FCB_HEADER Fcb; 312 CC_CAN_WRITE_RETRY TryContext; 313 PROS_SHARED_CACHE_MAP SharedCacheMap; 314 315 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n", 316 FileObject, BytesToWrite, Wait, Retrying); 317 318 /* Write through is always OK */ 319 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH)) 320 { 321 return TRUE; 322 } 323 324 TryContext = Retrying; 325 /* Allow remote file if not from posted */ 326 if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote) 327 { 328 return TRUE; 329 } 330 331 /* Don't exceed max tolerated size */ 332 Length = MAX_ZERO_LENGTH; 333 if (BytesToWrite < MAX_ZERO_LENGTH) 334 { 335 Length = BytesToWrite; 336 } 337 338 Pages = BYTES_TO_PAGES(Length); 339 340 /* By default, assume limits per file won't be hit */ 341 PerFileDefer = FALSE; 342 Fcb = FileObject->FsContext; 343 /* Do we have to check for limits per file? */ 344 if (TryContext >= RetryForceCheckPerFile || 345 BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES)) 346 { 347 /* If master is not locked, lock it now */ 348 if (TryContext != RetryMasterLocked) 349 { 350 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 351 } 352 353 /* Let's not assume the file is cached... */ 354 if (FileObject->SectionObjectPointer != NULL && 355 FileObject->SectionObjectPointer->SharedCacheMap != NULL) 356 { 357 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 358 /* Do we have limits per file set? */ 359 if (SharedCacheMap->DirtyPageThreshold != 0 && 360 SharedCacheMap->DirtyPages != 0) 361 { 362 /* Yes, check whether they are blocking */ 363 if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold) 364 { 365 PerFileDefer = TRUE; 366 } 367 } 368 } 369 370 /* And don't forget to release master */ 371 if (TryContext != RetryMasterLocked) 372 { 373 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 374 } 375 } 376 377 /* So, now allow write if: 378 * - Not the first try or we have no throttling yet 379 * AND: 380 * - We don't exceed threshold! 381 * - We don't exceed what Mm can allow us to use 382 * + If we're above top, that's fine 383 * + If we're above bottom with limited modified pages, that's fine 384 * + Otherwise, throttle! 385 */ 386 if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) && 387 CcTotalDirtyPages + Pages < CcDirtyPageThreshold && 388 (MmAvailablePages > MmThrottleTop || 389 (MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) && 390 !PerFileDefer) 391 { 392 return TRUE; 393 } 394 395 /* If we can wait, we'll start the wait loop for waiting till we can 396 * write for real 397 */ 398 if (!Wait) 399 { 400 return FALSE; 401 } 402 403 /* Otherwise, if there are no deferred writes yet, start the lazy writer */ 404 if (IsListEmpty(&CcDeferredWrites)) 405 { 406 KIRQL OldIrql; 407 408 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 409 CcScheduleLazyWriteScan(TRUE); 410 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 411 } 412 413 /* Initialize our wait event */ 414 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE); 415 416 /* And prepare a dummy context */ 417 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE; 418 Context.NodeByteSize = sizeof(DEFERRED_WRITE); 419 Context.FileObject = FileObject; 420 Context.BytesToWrite = BytesToWrite; 421 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES); 422 Context.Event = &WaitEvent; 423 424 /* And queue it */ 425 if (Retrying) 426 { 427 /* To the top, if that's a retry */ 428 ExInterlockedInsertHeadList(&CcDeferredWrites, 429 &Context.DeferredWriteLinks, 430 &CcDeferredWriteSpinLock); 431 } 432 else 433 { 434 /* To the bottom, if that's a first time */ 435 ExInterlockedInsertTailList(&CcDeferredWrites, 436 &Context.DeferredWriteLinks, 437 &CcDeferredWriteSpinLock); 438 } 439 440 #if DBG 441 DPRINT1("Actively deferring write for: %p\n", FileObject); 442 DPRINT1("Because:\n"); 443 if (CcTotalDirtyPages + Pages >= CcDirtyPageThreshold) 444 DPRINT1(" There are too many cache dirty pages: %x + %x >= %x\n", CcTotalDirtyPages, Pages, CcDirtyPageThreshold); 445 if (MmAvailablePages <= MmThrottleTop) 446 DPRINT1(" Available pages are below throttle top: %lx <= %lx\n", MmAvailablePages, MmThrottleTop); 447 if (MmModifiedPageListHead.Total >= 1000) 448 DPRINT1(" There are too many modified pages: %lu >= 1000\n", MmModifiedPageListHead.Total); 449 if (MmAvailablePages <= MmThrottleBottom) 450 DPRINT1(" Available pages are below throttle bottom: %lx <= %lx\n", MmAvailablePages, MmThrottleBottom); 451 #endif 452 /* Now, we'll loop until our event is set. When it is set, it means that caller 453 * can immediately write, and has to 454 */ 455 do 456 { 457 CcPostDeferredWrites(); 458 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS); 459 460 return TRUE; 461 } 462 463 static 464 int 465 CcpCheckInvalidUserBuffer(PEXCEPTION_POINTERS Except, PVOID Buffer, ULONG Length) 466 { 467 ULONG_PTR ExceptionAddress; 468 ULONG_PTR BeginAddress = (ULONG_PTR)Buffer; 469 ULONG_PTR EndAddress = (ULONG_PTR)Buffer + Length; 470 471 if (Except->ExceptionRecord->ExceptionCode != STATUS_ACCESS_VIOLATION) 472 return EXCEPTION_CONTINUE_SEARCH; 473 if (Except->ExceptionRecord->NumberParameters < 2) 474 return EXCEPTION_CONTINUE_SEARCH; 475 476 ExceptionAddress = Except->ExceptionRecord->ExceptionInformation[1]; 477 if ((ExceptionAddress >= BeginAddress) && (ExceptionAddress < EndAddress)) 478 return EXCEPTION_EXECUTE_HANDLER; 479 480 return EXCEPTION_CONTINUE_SEARCH; 481 } 482 483 /* 484 * @implemented 485 */ 486 BOOLEAN 487 NTAPI 488 CcCopyRead ( 489 IN PFILE_OBJECT FileObject, 490 IN PLARGE_INTEGER FileOffset, 491 IN ULONG Length, 492 IN BOOLEAN Wait, 493 OUT PVOID Buffer, 494 OUT PIO_STATUS_BLOCK IoStatus) 495 { 496 PROS_VACB Vacb; 497 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 498 NTSTATUS Status; 499 LONGLONG CurrentOffset; 500 LONGLONG ReadEnd = FileOffset->QuadPart + Length; 501 ULONG ReadLength = 0; 502 503 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n", 504 FileObject, FileOffset->QuadPart, Length, Wait); 505 506 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, " 507 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n", 508 FileObject, FileOffset->QuadPart, Length, Wait, 509 Buffer, IoStatus); 510 511 if (!SharedCacheMap) 512 return FALSE; 513 514 /* Documented to ASSERT, but KMTests test this case... */ 515 // ASSERT((FileOffset->QuadPart + Length) <= SharedCacheMap->FileSize.QuadPart); 516 517 CurrentOffset = FileOffset->QuadPart; 518 while(CurrentOffset < ReadEnd) 519 { 520 Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb); 521 if (!NT_SUCCESS(Status)) 522 { 523 ExRaiseStatus(Status); 524 return FALSE; 525 } 526 527 _SEH2_TRY 528 { 529 ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY; 530 ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset); 531 SIZE_T CopyLength = VacbLength; 532 533 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength)) 534 return FALSE; 535 536 _SEH2_TRY 537 { 538 RtlCopyMemory(Buffer, (PUCHAR)Vacb->BaseAddress + VacbOffset, CopyLength); 539 } 540 _SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength)) 541 { 542 ExRaiseStatus(STATUS_INVALID_USER_BUFFER); 543 } 544 _SEH2_END; 545 546 ReadLength += VacbLength; 547 548 Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength); 549 CurrentOffset += VacbLength; 550 Length -= VacbLength; 551 } 552 _SEH2_FINALLY 553 { 554 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 555 } 556 _SEH2_END; 557 } 558 559 IoStatus->Status = STATUS_SUCCESS; 560 IoStatus->Information = ReadLength; 561 562 #if 0 563 /* If that was a successful sync read operation, let's handle read ahead */ 564 if (Length == 0 && Wait) 565 { 566 PPRIVATE_CACHE_MAP PrivateCacheMap = FileObject->PrivateCacheMap; 567 568 /* If file isn't random access and next read may get us cross VACB boundary, 569 * schedule next read 570 */ 571 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) && 572 (CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + ReadLength - 1) / VACB_MAPPING_GRANULARITY) 573 { 574 CcScheduleReadAhead(FileObject, FileOffset, ReadLength); 575 } 576 577 /* And update read history in private cache map */ 578 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart; 579 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart; 580 PrivateCacheMap->FileOffset2.QuadPart = FileOffset->QuadPart; 581 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset->QuadPart + ReadLength; 582 } 583 #endif 584 585 return TRUE; 586 } 587 588 /* 589 * @implemented 590 */ 591 BOOLEAN 592 NTAPI 593 CcCopyWrite ( 594 IN PFILE_OBJECT FileObject, 595 IN PLARGE_INTEGER FileOffset, 596 IN ULONG Length, 597 IN BOOLEAN Wait, 598 IN PVOID Buffer) 599 { 600 PROS_VACB Vacb; 601 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 602 NTSTATUS Status; 603 LONGLONG CurrentOffset; 604 LONGLONG WriteEnd; 605 606 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n", 607 FileObject, FileOffset->QuadPart, Length, Wait, Buffer); 608 609 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, " 610 "Length %lu, Wait %u, Buffer 0x%p)\n", 611 FileObject, FileOffset->QuadPart, Length, Wait, Buffer); 612 613 if (!SharedCacheMap) 614 return FALSE; 615 616 Status = RtlLongLongAdd(FileOffset->QuadPart, Length, &WriteEnd); 617 if (!NT_SUCCESS(Status)) 618 ExRaiseStatus(Status); 619 620 ASSERT(WriteEnd <= SharedCacheMap->SectionSize.QuadPart); 621 622 CurrentOffset = FileOffset->QuadPart; 623 while(CurrentOffset < WriteEnd) 624 { 625 ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY; 626 ULONG VacbLength = min(WriteEnd - CurrentOffset, VACB_MAPPING_GRANULARITY - VacbOffset); 627 628 Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb); 629 if (!NT_SUCCESS(Status)) 630 { 631 ExRaiseStatus(Status); 632 return FALSE; 633 } 634 635 _SEH2_TRY 636 { 637 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength)) 638 { 639 return FALSE; 640 } 641 642 _SEH2_TRY 643 { 644 RtlCopyMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), Buffer, VacbLength); 645 } 646 _SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength)) 647 { 648 ExRaiseStatus(STATUS_INVALID_USER_BUFFER); 649 } 650 _SEH2_END; 651 652 Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength); 653 CurrentOffset += VacbLength; 654 655 /* Tell Mm */ 656 Status = MmMakePagesDirty(NULL, Add2Ptr(Vacb->BaseAddress, VacbOffset), VacbLength); 657 if (!NT_SUCCESS(Status)) 658 ExRaiseStatus(Status); 659 } 660 _SEH2_FINALLY 661 { 662 /* Do not mark the VACB as dirty if an exception was raised */ 663 CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE); 664 } 665 _SEH2_END; 666 } 667 668 /* Flush if needed */ 669 if (FileObject->Flags & FO_WRITE_THROUGH) 670 CcFlushCache(FileObject->SectionObjectPointer, FileOffset, Length, NULL); 671 672 return TRUE; 673 } 674 675 /* 676 * @implemented 677 */ 678 VOID 679 NTAPI 680 CcDeferWrite ( 681 IN PFILE_OBJECT FileObject, 682 IN PCC_POST_DEFERRED_WRITE PostRoutine, 683 IN PVOID Context1, 684 IN PVOID Context2, 685 IN ULONG BytesToWrite, 686 IN BOOLEAN Retrying) 687 { 688 KIRQL OldIrql; 689 PDEFERRED_WRITE Context; 690 PFSRTL_COMMON_FCB_HEADER Fcb; 691 692 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n", 693 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying); 694 695 /* Try to allocate a context for queueing the write operation */ 696 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw'); 697 /* If it failed, immediately execute the operation! */ 698 if (Context == NULL) 699 { 700 PostRoutine(Context1, Context2); 701 return; 702 } 703 704 Fcb = FileObject->FsContext; 705 706 /* Otherwise, initialize the context */ 707 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE)); 708 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE; 709 Context->NodeByteSize = sizeof(DEFERRED_WRITE); 710 Context->FileObject = FileObject; 711 Context->PostRoutine = PostRoutine; 712 Context->Context1 = Context1; 713 Context->Context2 = Context2; 714 Context->BytesToWrite = BytesToWrite; 715 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES); 716 717 /* And queue it */ 718 if (Retrying) 719 { 720 /* To the top, if that's a retry */ 721 ExInterlockedInsertHeadList(&CcDeferredWrites, 722 &Context->DeferredWriteLinks, 723 &CcDeferredWriteSpinLock); 724 } 725 else 726 { 727 /* To the bottom, if that's a first time */ 728 ExInterlockedInsertTailList(&CcDeferredWrites, 729 &Context->DeferredWriteLinks, 730 &CcDeferredWriteSpinLock); 731 } 732 733 /* Try to execute the posted writes */ 734 CcPostDeferredWrites(); 735 736 /* Schedule a lazy writer run to handle deferred writes */ 737 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 738 if (!LazyWriter.ScanActive) 739 { 740 CcScheduleLazyWriteScan(FALSE); 741 } 742 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 743 } 744 745 /* 746 * @unimplemented 747 */ 748 VOID 749 NTAPI 750 CcFastCopyRead ( 751 IN PFILE_OBJECT FileObject, 752 IN ULONG FileOffset, 753 IN ULONG Length, 754 IN ULONG PageCount, 755 OUT PVOID Buffer, 756 OUT PIO_STATUS_BLOCK IoStatus) 757 { 758 LARGE_INTEGER LargeFileOffset; 759 BOOLEAN Success; 760 761 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n", 762 FileObject, FileOffset, Length, PageCount, Buffer); 763 764 DBG_UNREFERENCED_PARAMETER(PageCount); 765 766 LargeFileOffset.QuadPart = FileOffset; 767 Success = CcCopyRead(FileObject, 768 &LargeFileOffset, 769 Length, 770 TRUE, 771 Buffer, 772 IoStatus); 773 ASSERT(Success == TRUE); 774 } 775 776 /* 777 * @unimplemented 778 */ 779 VOID 780 NTAPI 781 CcFastCopyWrite ( 782 IN PFILE_OBJECT FileObject, 783 IN ULONG FileOffset, 784 IN ULONG Length, 785 IN PVOID Buffer) 786 { 787 LARGE_INTEGER LargeFileOffset; 788 BOOLEAN Success; 789 790 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n", 791 FileObject, FileOffset, Length, Buffer); 792 793 LargeFileOffset.QuadPart = FileOffset; 794 Success = CcCopyWrite(FileObject, 795 &LargeFileOffset, 796 Length, 797 TRUE, 798 Buffer); 799 ASSERT(Success == TRUE); 800 } 801 802 /* 803 * @implemented 804 */ 805 BOOLEAN 806 NTAPI 807 CcZeroData ( 808 IN PFILE_OBJECT FileObject, 809 IN PLARGE_INTEGER StartOffset, 810 IN PLARGE_INTEGER EndOffset, 811 IN BOOLEAN Wait) 812 { 813 NTSTATUS Status; 814 LARGE_INTEGER WriteOffset; 815 LONGLONG Length; 816 PROS_VACB Vacb; 817 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 818 819 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n", 820 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait); 821 822 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, " 823 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart, 824 Wait); 825 826 Length = EndOffset->QuadPart - StartOffset->QuadPart; 827 WriteOffset.QuadPart = StartOffset->QuadPart; 828 829 if (!SharedCacheMap) 830 { 831 /* Make this a non-cached write */ 832 IO_STATUS_BLOCK Iosb; 833 KEVENT Event; 834 PMDL Mdl; 835 ULONG i; 836 ULONG CurrentLength; 837 PPFN_NUMBER PfnArray; 838 839 /* Setup our Mdl */ 840 Mdl = IoAllocateMdl(NULL, min(Length, MAX_ZERO_LENGTH), FALSE, FALSE, NULL); 841 if (!Mdl) 842 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 843 844 PfnArray = MmGetMdlPfnArray(Mdl); 845 for (i = 0; i < BYTES_TO_PAGES(Mdl->ByteCount); i++) 846 PfnArray[i] = CcZeroPage; 847 Mdl->MdlFlags |= MDL_PAGES_LOCKED; 848 849 /* Perform the write sequencially */ 850 while (Length > 0) 851 { 852 CurrentLength = min(Length, MAX_ZERO_LENGTH); 853 854 Mdl->ByteCount = CurrentLength; 855 856 KeInitializeEvent(&Event, NotificationEvent, FALSE); 857 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb); 858 if (Status == STATUS_PENDING) 859 { 860 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL); 861 Status = Iosb.Status; 862 } 863 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) 864 { 865 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl); 866 } 867 if (!NT_SUCCESS(Status)) 868 { 869 IoFreeMdl(Mdl); 870 ExRaiseStatus(Status); 871 } 872 WriteOffset.QuadPart += CurrentLength; 873 Length -= CurrentLength; 874 } 875 876 IoFreeMdl(Mdl); 877 878 return TRUE; 879 } 880 881 /* See if we should simply truncate the valid data length */ 882 if ((StartOffset->QuadPart < SharedCacheMap->ValidDataLength.QuadPart) && (EndOffset->QuadPart >= SharedCacheMap->ValidDataLength.QuadPart)) 883 { 884 DPRINT1("Truncating VDL.\n"); 885 SharedCacheMap->ValidDataLength = *StartOffset; 886 return TRUE; 887 } 888 889 ASSERT(EndOffset->QuadPart <= SharedCacheMap->SectionSize.QuadPart); 890 891 while(WriteOffset.QuadPart < EndOffset->QuadPart) 892 { 893 ULONG VacbOffset = WriteOffset.QuadPart % VACB_MAPPING_GRANULARITY; 894 ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset); 895 896 Status = CcRosGetVacb(SharedCacheMap, WriteOffset.QuadPart, &Vacb); 897 if (!NT_SUCCESS(Status)) 898 { 899 ExRaiseStatus(Status); 900 return FALSE; 901 } 902 903 _SEH2_TRY 904 { 905 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength)) 906 { 907 return FALSE; 908 } 909 910 RtlZeroMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), VacbLength); 911 912 WriteOffset.QuadPart += VacbLength; 913 Length -= VacbLength; 914 915 /* Tell Mm */ 916 Status = MmMakePagesDirty(NULL, Add2Ptr(Vacb->BaseAddress, VacbOffset), VacbLength); 917 if (!NT_SUCCESS(Status)) 918 ExRaiseStatus(Status); 919 } 920 _SEH2_FINALLY 921 { 922 /* Do not mark the VACB as dirty if an exception was raised */ 923 CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE); 924 } 925 _SEH2_END; 926 } 927 928 /* Flush if needed */ 929 if (FileObject->Flags & FO_WRITE_THROUGH) 930 CcFlushCache(FileObject->SectionObjectPointer, StartOffset, EndOffset->QuadPart - StartOffset->QuadPart, NULL); 931 932 return TRUE; 933 } 934